def __init__(self, pencilfolder): self.datadir = os.path.join(pencilfolder, 'data') try: self.grid = pc.read_grid(self.datadir, trim=True, quiet=True) except Exception, e: print "Cannot read grid. Have you run 'pc_run start' already?"
def readField(simdir, varfile): var = pc.read_var(datadir = simdir, varfile = varfile, magic = 'bb', quiet = True, trimall = True) grid = pc.read_grid(datadir = simdir, quiet = True) bb = var.bb p = pClass() p.dx = var.dx; p.dy = var.dy; p.dz = var.dz p.Ox = var.x[0]; p.Oy = var.y[0]; p.Oz = var.z[0] p.Lx = grid.Lx; p.Ly = grid.Ly; p.Lz = grid.Lz p.nx = var.bb.shape[1]; p.ny = var.bb.shape[2]; p.nz = var.bb.shape[3] return bb, p, var.t
def __init__(self, pencilfolder): """Initializes CoefficientCreator based on a Pencil-code folder. Parameters ---------- pencilfolder : str Pencil-code that provides the grid and will be used to store the coefficients. pc_start needs to be run on the folder beforehand. """ self.datadir = os.path.join(pencilfolder, 'data') try: self.grid = pc.read_grid(self.datadir, trim=True, quiet=True) except Exception as e: print("Cannot read grid. Have you run 'pc_run start' already?") raise e self.datatype = self.grid.x.dtype self.h5file = os.path.join(self.datadir, 'emftensors.h5')
def readField(simdir, varfile): var = pc.read_var(datadir=simdir, varfile=varfile, magic='bb', quiet=True, trimall=True) grid = pc.read_grid(datadir=simdir, quiet=True) bb = var.bb p = pClass() p.dx = var.dx p.dy = var.dy p.dz = var.dz p.Ox = var.x[0] p.Oy = var.y[0] p.Oz = var.z[0] p.Lx = grid.Lx p.Ly = grid.Ly p.Lz = grid.Lz p.nx = var.bb.shape[1] p.ny = var.bb.shape[2] p.nz = var.bb.shape[3] return bb, p, var.t
def aver2vtk(varfile="xyaverages.dat", datadir="data/", destination="xyaverages", quiet=1): """ Convert average data from PencilCode format to vtk. call signature:: aver2vtk(varfile = 'xyaverages.dat', datadir = 'data/', destination = 'xyaverages', quiet = 1): Read the average file specified in *varfile* and convert the data into vtk format. Write the result in *destination*. Keyword arguments: *varfile*: Name of the average file. This also specifies which dimensions the averages are taken. *datadir*: Directory where the data is stored. *destination*: Destination file. """ # read the grid dimensions grid = pc.read_grid(datadir=datadir, trim=True, quiet=True) # read the specified average file if varfile[0:2] == "xy": aver = pc.read_xyaver() line_len = int(np.round(grid.Lz / grid.dz)) l0 = grid.z[(len(grid.z) - line_len) / 2] dl = grid.dz elif varfile[0:2] == "xz": aver = pc.read_xzaver() line_len = int(np.round(grid.Ly / grid.dy)) l0 = grid.y[(len(grid.y) - line_len) / 2] dl = grid.dy elif varfile[0:2] == "yz": aver = pc.read_yzaver() line_len = int(np.round(grid.Lx / grid.dx)) l0 = grid.x[(len(grid.x) - line_len) / 2] dl = grid.dx else: print("aver2vtk: ERROR: cannot determine average file\n") print("aver2vtk: The name of the file has to be either xyaver.dat, xzaver.dat or yzaver.dat\n") return -1 keys = list(aver.__dict__.keys()) t = aver.t keys.remove("t") # open the destination file fd = open(destination + ".vtk", "wb") fd.write("# vtk DataFile Version 2.0\n") fd.write(varfile[0:2] + "averages\n") fd.write("BINARY\n") fd.write("DATASET STRUCTURED_POINTS\n") fd.write("DIMENSIONS {0:9} {1:9} {2:9}\n".format(len(t), line_len, 1)) fd.write("ORIGIN {0:8.12} {1:8.12} {2:8.12}\n".format(float(t[0]), l0, 0.0)) fd.write("SPACING {0:8.12} {1:8.12} {2:8.12}\n".format(t[1] - t[0], dl, 1.0)) fd.write("POINT_DATA {0:9}\n".format(len(t) * line_len)) # run through all variables for var in keys: fd.write("SCALARS " + var + " float\n") fd.write("LOOKUP_TABLE default\n") for j in range(line_len): for i in range(len(t)): fd.write(struct.pack(">f", aver.__dict__[var][i, j])) fd.close()
def pc2vtk_vid( ti=0, tf=1, datadir="data/", proc=-1, variables=["rho", "uu", "bb"], magic=[], b_ext=False, destination="animation", quiet=True, ): """ Convert data from PencilCode format to vtk. call signature:: pc2vtk(ti = 0, tf = 1, datadir = 'data/', proc = -1, variables = ['rho','uu','bb'], magic = [], destination = 'animation') Read *varfile* and convert its content into vtk format. Write the result in *destination*. Keyword arguments: *ti*: Initial time. *tf*: Final time. *datadir*: Directory where the data is stored. *proc*: Processor which should be read. Set to -1 for all processors. *variables* = [ 'rho' , 'lnrho' , 'uu' , 'bb', 'b_mag', 'jj', 'j_mag', 'aa', 'ab', 'TT', 'lnTT', 'cc', 'lncc', 'ss', 'vort' ] Variables which should be written. *magic*: [ 'vort' , 'bb' ] Additional variables which should be written. *b_ext*: Add the external magnetic field. *destination*: Destination files without '.vtk' extension. *quiet*: Keep quiet when reading the var files. """ # this should correct for the case the user type only one variable if len(variables) > 0: if len(variables[0]) == 1: variables = [variables] # this should correct for the case the user type only one variable if len(magic) > 0: if len(magic[0]) == 1: magic = [magic] # make sure magic is set when writing 'vort' or 'bb' try: index = variables.index("vort") magic.append("vort") except: pass try: index = variables.index("bb") magic.append("bb") except: pass try: index = variables.index("b_mag") magic.append("bb") except: pass try: index = variables.index("jj") magic.append("jj") except: pass try: index = variables.index("j_mag") magic.append("jj") except: pass for i in range(ti, tf + 1): varfile = "VAR" + str(i) # reading pc variables and setting dimensions var = pc.read_var(varfile=varfile, datadir=datadir, proc=proc, magic=magic, trimall=True, quiet=quiet) grid = pc.read_grid(datadir=datadir, proc=proc, trim=True, quiet=True) params = pc.read_param(param2=True, quiet=True) B_ext = np.array(params.b_ext) # add external magnetic field if b_ext == True: var.bb[0, ...] += B_ext[0] var.bb[1, ...] += B_ext[1] var.bb[2, ...] += B_ext[2] dimx = len(grid.x) dimy = len(grid.y) dimz = len(grid.z) dim = dimx * dimy * dimz dx = (np.max(grid.x) - np.min(grid.x)) / (dimx - 1) dy = (np.max(grid.y) - np.min(grid.y)) / (dimy - 1) dz = (np.max(grid.z) - np.min(grid.z)) / (dimz - 1) # fd = open(destination + "{0:1.0f}".format(var.t*1e5) + '.vtk', 'wb') fd = open(destination + str(i) + ".vtk", "wb") fd.write("# vtk DataFile Version 2.0\n") fd.write("density + magnetic field\n") fd.write("BINARY\n") fd.write("DATASET STRUCTURED_POINTS\n") fd.write("DIMENSIONS {0:9} {1:9} {2:9}\n".format(dimx, dimy, dimz)) fd.write("ORIGIN {0:8.12} {1:8.12} {2:8.12}\n".format(grid.x[0], grid.y[0], grid.z[0])) fd.write("SPACING {0:8.12} {1:8.12} {2:8.12}\n".format(dx, dy, dz)) fd.write("POINT_DATA {0:9}\n".format(dim)) try: index = variables.index("rho") print("writing rho") fd.write("SCALARS rho float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.rho[k, j, i])) except: pass try: index = variables.index("lnrho") print("writing lnrho") fd.write("SCALARS lnrho float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lnrho[k, j, i])) except: pass try: index = variables.index("uu") print("writing uu") fd.write("VECTORS vfield float\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.uu[0, k, j, i])) fd.write(struct.pack(">f", var.uu[1, k, j, i])) fd.write(struct.pack(">f", var.uu[2, k, j, i])) except: pass try: index = variables.index("bb") print("writing bb") fd.write("VECTORS bfield float\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.bb[0, k, j, i])) fd.write(struct.pack(">f", var.bb[1, k, j, i])) fd.write(struct.pack(">f", var.bb[2, k, j, i])) except: pass try: index = variables.index("b_mag") b_mag = np.sqrt(pc.dot2(var.bb)) print("writing b_mag") fd.write("SCALARS b_mag float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", b_mag[k, j, i])) except: pass try: index = variables.index("jj") print("writing jj") fd.write("VECTORS jfield float\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.jj[0, k, j, i])) fd.write(struct.pack(">f", var.jj[1, k, j, i])) fd.write(struct.pack(">f", var.jj[2, k, j, i])) except: pass try: index = variables.index("j_mag") j_mag = np.sqrt(pc.dot2(var.jj)) print("writing j_mag") fd.write("SCALARS j_mag float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", j_mag[k, j, i])) except: pass try: index = variables.index("aa") print("writing aa") fd.write("VECTORS afield float\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.aa[0, k, j, i])) fd.write(struct.pack(">f", var.aa[1, k, j, i])) fd.write(struct.pack(">f", var.aa[2, k, j, i])) except: pass try: index = variables.index("ab") ab = pc.dot(var.aa, var.bb) print("writing ab") fd.write("SCALARS ab float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", ab[k, j, i])) except: pass try: index = variables.index("TT") print("writing TT") fd.write("SCALARS TT float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.TT[k, j, i])) except: pass try: index = variables.index("lnTT") print("writing lnTT") fd.write("SCALARS lnTT float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lnTT[k, j, i])) except: pass try: index = variables.index("cc") print("writing cc") fd.write("SCALARS cc float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.cc[k, j, i])) except: pass try: index = variables.index("lncc") print("writing lncc") fd.write("SCALARS lncc float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lncc[k, j, i])) except: pass try: index = variables.index("ss") print("writing ss") fd.write("SCALARS ss float\n") fd.write("LOOKUP_TABLE default\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.ss[k, j, i])) except: pass try: index = variables.index("vort") print("writing vort") fd.write("VECTORS vorticity float\n") for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.vort[0, k, j, i])) fd.write(struct.pack(">f", var.vort[1, k, j, i])) fd.write(struct.pack(">f", var.vort[2, k, j, i])) except: pass del (var) fd.close()
# $Id: pvid2D.py,v 1.1.1.1 2009-12-16 17:37:16 dintrans Exp $ import numpy as N import pylab as P import pencil as pc from os import system system('cat video.in') field = raw_input('which field? ') f, t = pc.read_slices(field=field, proc=0, extension='xz') ux, t = pc.read_slices(field='uu1', proc=0, extension='xz') uz, t = pc.read_slices(field='uu3', proc=0, extension='xz') dim = pc.read_dim() grid = pc.read_grid(trim=True) param = pc.read_param(quiet=True) nt = len(t) f = f.reshape(nt, dim.nz, dim.nx) ux = ux.reshape(nt, dim.nz, dim.nx) uz = uz.reshape(nt, dim.nz, dim.nx) P.ion() frame = param.xyz0[0], param.xyz1[0], param.xyz0[2], param.xyz1[2] qs1 = N.random.random_integers(0, dim.nx - 1, 1000) qs2 = N.random.random_integers(0, dim.nz - 1, 1000) xx, zz = P.meshgrid(grid.x, grid.z) im = P.imshow(f[0, ...], extent=frame, origin='lower', aspect='auto') a = ux[0, qs2, qs1]**2 + uz[0, qs2, qs1]**2 norm = N.sqrt(a.max())
def power2vtk(powerfiles=['power_mag.dat'], datadir='data/', destination='power', thickness=1): """ Convert power spectra from PencilCode format to vtk. call signature:: power2vtk(powerfiles = ['power_mag.dat'], datadir = 'data/', destination = 'power.vtk', thickness = 1): Read the power spectra stored in the power*.dat files and convert them into vtk format. Write the result in *destination*. Keyword arguments: *powerfiles*: The files containing the power spectra. *datadir*: Directory where the data is stored. *destination*: Destination file. *thickness*: Dimension in z-direction. Setting it 2 will create n*m*2 dimensional array of data. This is useful in Paraview for visualizing the spectrum in 3 dimensions. Note that this will simply double the amount of data. """ # this should correct for the case the user types only one variable if (len(powerfiles) > 0): if (len(powerfiles[0]) == 1): powerfiles = [powerfiles] # read the grid dimensions grid = pc.read_grid(datadir=datadir, trim=True, quiet=True) # leave k0 to 1 now, will fix this later k0 = 1. # leave dk to 1 now, will fix this later dk = 1. # open the destination file fd = open(destination + '.vtk', 'wb') # read the first power spectrum t, power = pc.read_power(datadir + powerfiles[0]) fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8')) fd.write('power spectra\n'.encode('utf-8')) fd.write('BINARY\n'.encode('utf-8')) fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8')) if (thickness == 1): fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( len(t), power.shape[1], 1).encode('utf-8')) else: fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( len(t), power.shape[1], 2).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(float(t[0]), k0, 0.).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( t[1] - t[0], dk, 1.).encode('utf-8')) if (thickness == 1): fd.write('POINT_DATA {0:9}\n'.format(power.shape[0] * power.shape[1]).encode('utf-8')) else: fd.write('POINT_DATA {0:9}\n'.format(power.shape[0] * power.shape[1] * 2).encode('utf-8')) for powfile in powerfiles: # read the power spectrum t, power = pc.read_power(datadir + powfile) fd.write(('SCALARS ' + powfile[:-4] + ' float\n').encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) if (thickness == 1): for j in range(power.shape[1]): for i in range(len(t)): fd.write(struct.pack(">f", power[i, j])) else: for k in [1, 2]: for j in range(power.shape[1]): for i in range(len(t)): fd.write(struct.pack(">f", power[i, j])) fd.close()
def slices2vtk(variables=['rho'], extensions=['xy', 'xy2', 'xz', 'yz'], datadir='data/', destination='slices', proc=-1, format='native'): """ Convert slices from PencilCode format to vtk. call signature:: slices2vtk(variables = ['rho'], extensions = ['xy', 'xy2', 'xz', 'yz'], datadir = 'data/', destination = 'slices', proc = -1, format = 'native'): Read slice files specified by *variables* and convert them into vtk format for the specified extensions. Write the result in *destination*. NB: You need to have called src/read_videofiles.x before using this script. Keyword arguments: *variables*: All allowed fields which can be written as slice files, e.g. b2, uu1, lnrho, ... See the pencil code manual for more (chapter: "List of parameters for `video.in'"). *extensions*: List of slice positions. *datadir*: Directory where the data is stored. *destination*: Destination files. *proc*: Processor which should be read. Set to -1 for all processors. *format*: Endian, one of little, big, or native (default) """ # this should correct for the case the user types only one variable if (len(variables) > 0): if (len(variables[0]) == 1): variables = [variables] # this should correct for the case the user types only one extension if (len(extensions) > 0): if (len(extensions[0]) == 1): extensions = [extensions] # read the grid dimensions grid = pc.read_grid(datadir=datadir, proc=proc, trim=True, quiet=True) # read the user given parameters for the slice positions params = pc.read_param(param2=True, quiet=True) # run through all specified variables for field in variables: # run through all specified extensions for ext in extensions: print("read " + field + ' ' + ext) slices, t = pc.read_slices(field=field, datadir=datadir, proc=proc, extension=ext, format=format) dim_p = slices.shape[2] dim_q = slices.shape[1] if ext[0] == 'x': d_p = (np.max(grid.x) - np.min(grid.x)) / (dim_p) else: d_p = (np.max(grid.y) - np.min(grid.y)) / (dim_p) if ext[1] == 'y': d_q = (np.max(grid.y) - np.min(grid.y)) / (dim_q) else: d_q = (np.max(grid.z) - np.min(grid.z)) / (dim_q) if params.ix != -1: x0 = grid.x[params.ix] elif params.slice_position == 'm': x0 = grid.x[int(len(grid.x) / 2)] if params.iy != -1: y0 = grid.y[params.iy] elif params.slice_position == 'm': y0 = grid.y[int(len(grid.y) / 2)] if params.iz != -1: z0 = grid.z[params.iz] elif params.slice_position == 'm': z0 = grid.z[int(len(grid.z) / 2)] for i in range(slices.shape[0]): # open the destination file for writing fd = open( destination + '_' + field + '_' + ext + '_' + str(i) + '.vtk', 'wb') # write the header fd.write('# vtk DataFile Version 2.0\n') fd.write(field + '_' + ext + '\n') fd.write('BINARY\n') fd.write('DATASET STRUCTURED_POINTS\n') if ext[0:2] == 'xy': x0 = grid.x[0] y0 = grid.y[0] fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( dim_p, dim_q, 1)) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( x0, y0, z0)) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.dx, grid.dy, 1.)) elif ext[0:2] == 'xz': x0 = grid.x[0] z0 = grid.z[0] fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( dim_p, 1, dim_q)) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( x0, y0, z0)) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.dx, 1., grid.dy)) elif ext[0:2] == 'yz': y0 = grid.y[0] z0 = grid.z[0] fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format( 1, dim_p, dim_q)) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( x0, y0, z0)) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( 1., grid.dy, grid.dy)) fd.write('POINT_DATA {0:9}\n'.format(dim_p * dim_q)) fd.write('SCALARS ' + field + '_' + ext + ' float\n') fd.write('LOOKUP_TABLE default\n') for j in range(dim_q): for k in range(dim_p): fd.write(struct.pack(">f", slices[i, j, k])) fd.close()
def find_tracers(self, trace_field='bb', h_min=2e-3, h_max=2e4, len_max=500, tol=1e-2, iter_max=1e3, interpolation='trilinear', trace_sub=1, int_q=[''], varfile='VAR0', ti=-1, tf=-1, integration='simple', data_dir='./data', n_proc=1): """ Trace streamlines from the VAR files and integrate quantity 'int_q' along them. call signature:: find_tracers(self, trace_field='bb', h_min=2e-3, h_max=2e4, len_max=500, tol=1e-2, iter_max=1e3, interpolation='trilinear', trace_sub=1, int_q=[''], varfile='VAR0', ti=-1, tf=-1, integration='simple', data_dir='data/', n_proc=1) Trace streamlines of the vectofield 'field' from z = z0 to z = z1 and integrate quantities 'int_q' along the lines. Creates a 2d mapping as in 'streamlines.f90'. Keyword arguments: *trace_field*: Vector field used for the streamline tracing. *h_min*: Minimum step length for and underflow to occur. *h_max*: Parameter for the initial step length. *len_max*: Maximum length of the streamline. Integration will stop if len >= len_max. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *iter_max*: Maximum number of iterations. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'trilinear': weights the adjacent grid points according to their distance. *trace_sub*: Number of sub-grid cells for the seeds. *int_q*: Quantities to be integrated along the streamlines. *varfile*: Varfile to be read. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *ti*: Initial VAR file index for tracer time sequences. Overrides 'varfile'. *tf*: Final VAR file index for tracer time sequences. Overrides 'varfile'. *data_dir*: Directory where the data is stored. *n_proc*: Number of cores for multi core computation. """ # Return the tracers for the specified starting locations. def __sub_tracers(queue, var, field, t_idx, i_proc, n_proc): xx = np.zeros([(self.x0.shape[0]+n_proc-1-i_proc)/n_proc, self.x0.shape[1], 3]) xx[:, :, 0] = self.x0[i_proc:self.x0.shape[0]:n_proc, :, t_idx].copy() xx[:, :, 1] = self.y0[i_proc:self.x0.shape[0]:n_proc, :, t_idx].copy() xx[:, :, 2] = self.z1[i_proc:self.x0.shape[0]:n_proc, :, t_idx].copy() # Initialize the local arrays for this core. sub_x1 = np.zeros(xx[:, :, 0].shape) sub_y1 = np.zeros(xx[:, :, 0].shape) sub_z1 = np.zeros(xx[:, :, 0].shape) sub_l = np.zeros(xx[:, :, 0].shape) sub_curly_A = np.zeros(xx[:, :, 0].shape) sub_ee = np.zeros(xx[:, :, 0].shape) sub_mapping = np.zeros([xx[:, :, 0].shape[0], xx[:, :, 0].shape[1], 3]) for ix in range(i_proc, self.x0.shape[0], n_proc): for iy in range(self.x0.shape[1]): stream = Stream(field, self.params, interpolation=interpolation, h_min=h_min, h_max=h_max, len_max=len_max, tol=tol, iter_max=iter_max, xx=xx[int(ix/n_proc), iy, :]) sub_x1[int(ix/n_proc), iy] = stream.tracers[stream.stream_len-1, 0] sub_y1[int(ix/n_proc), iy] = stream.tracers[stream.stream_len-1, 1] sub_z1[int(ix/n_proc), iy] = stream.tracers[stream.stream_len-1, 2] sub_l[int(ix/n_proc), iy] = stream.len if any(np.array(self.params.int_q) == 'curly_A'): for l in range(stream.stream_len-1): aaInt = vec_int((stream.tracers[l+1] + stream.tracers[l])/2, var, aa, interpolation=self.params.interpolation) sub_curly_A[int(ix/n_proc), iy] += \ np.dot(aaInt, (stream.tracers[l+1] - stream.tracers[l])) if any(np.array(self.params.int_q) == 'ee'): for l in range(stream.stream_len-1): eeInt = vec_int((stream.tracers[l+1] + stream.tracers[l])/2, var, ee, interpolation=self.params.interpolation) sub_ee[int(ix/n_proc), iy] += \ np.dot(eeInt, (stream.tracers[l+1] - stream.tracers[l])) # Create the color mapping. if (sub_z1[int(ix/n_proc), iy] > self.params.Oz+self.params.Lz-self.params.dz*4): if (self.x0[ix, iy, t_idx] - sub_x1[int(ix/n_proc), iy]) > 0: if (self.y0[ix, iy, t_idx] - sub_y1[int(ix/n_proc), iy]) > 0: sub_mapping[int(ix/n_proc), iy, :] = [0, 1, 0] else: sub_mapping[int(ix/n_proc), iy, :] = [1, 1, 0] else: if (self.y0[ix, iy, t_idx] - sub_y1[int(ix/n_proc), iy]) > 0: sub_mapping[int(ix/n_proc), iy, :] = [0, 0, 1] else: sub_mapping[int(ix/n_proc), iy, :] = [1, 0, 0] else: sub_mapping[int(ix/n_proc), iy, :] = [1, 1, 1] queue.put((i_proc, sub_x1, sub_y1, sub_z1, sub_l, sub_mapping, sub_curly_A, sub_ee)) # Write the tracing parameters. self.params.trace_field = trace_field self.params.h_min = h_min self.params.h_max = h_max self.params.len_max = len_max self.params.tol = tol self.params.interpolation = interpolation self.params.trace_sub = trace_sub self.params.int_q = int_q self.params.varfile = varfile self.params.ti = ti self.params.tf = tf self.params.integration = integration self.params.data_dir = data_dir self.params.n_proc = n_proc # Multi core setup. if not(np.isscalar(n_proc)) or (n_proc%1 != 0): print "error: invalid processor number" return -1 queue = mp.Queue() # Convert int_q string into list. if not isinstance(int_q, list): int_q = [int_q] # Read the data. magic = [] if trace_field == 'bb': magic.append('bb') if trace_field == 'jj': magic.append('jj') if trace_field == 'vort': magic.append('vort') if any(np.array(int_q) == 'ee'): magic.append('bb') magic.append('jj') dim = pc.read_dim(datadir=data_dir) # Check if user wants a tracer time series. if (ti%1 == 0) and (tf%1 == 0) and (ti >= 0) and (tf >= ti): series = True nTimes = tf-ti+1 else: series = False nTimes = 1 # Initialize the arrays. self.x0 = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes]) self.y0 = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes]) self.x1 = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes]) self.y1 = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes]) self.z1 = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes]) self.l = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes]) if any(np.array(int_q) == 'curly_A'): self.curly_A = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes]) if any(np.array(int_q) == 'ee'): self.ee = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes]) self.mapping = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), nTimes, 3]) self.t = np.zeros(nTimes) for t_idx in range(ti, tf+1): if series: varfile = 'VAR' + str(t_idx) # Read the data. var = pc.read_var(varfile=varfile, datadir=data_dir, magic=magic, quiet=True, trimall=True) grid = pc.read_grid(datadir=data_dir, quiet=True, trim=True) param2 = pc.read_param(datadir=data_dir, param2=True, quiet=True) self.t[t_idx] = var.t # Extract the requested vector trace_field. field = getattr(var, trace_field) if any(np.array(int_q) == 'curly_A'): aa = var.aa if any(np.array(int_q) == 'ee'): ee = var.jj*param2.eta - pc.cross(var.uu, var.bb) # Get the simulation parameters. self.params.dx = var.dx self.params.dy = var.dy self.params.dz = var.dz self.params.Ox = var.x[0] self.params.Oy = var.y[0] self.params.Oz = var.z[0] self.params.Lx = grid.Lx self.params.Ly = grid.Ly self.params.Lz = grid.Lz self.params.nx = dim.nx self.params.ny = dim.ny self.params.nz = dim.nz # Initialize the tracers. for ix in range(int(trace_sub*dim.nx)): for iy in range(int(trace_sub*dim.ny)): self.x0[ix, iy, t_idx] = grid.x[0] + grid.dx/trace_sub*ix self.x1[ix, iy, t_idx] = self.x0[ix, iy, t_idx].copy() self.y0[ix, iy, t_idx] = grid.y[0] + grid.dy/trace_sub*iy self.y1[ix, iy, t_idx] = self.y0[ix, iy, t_idx].copy() self.z1[ix, iy, t_idx] = grid.z[0] proc = [] sub_data = [] for i_proc in range(n_proc): proc.append(mp.Process(target=__sub_tracers, args=(queue, var, field, t_idx, i_proc, n_proc))) for i_proc in range(n_proc): proc[i_proc].start() for i_proc in range(n_proc): sub_data.append(queue.get()) for i_proc in range(n_proc): proc[i_proc].join() for i_proc in range(n_proc): # Extract the data from the single cores. Mind the order. sub_proc = sub_data[i_proc][0] self.x1[sub_proc::n_proc, :, t_idx] = sub_data[i_proc][1] self.y1[sub_proc::n_proc, :, t_idx] = sub_data[i_proc][2] self.z1[sub_proc::n_proc, :, t_idx] = sub_data[i_proc][3] self.l[sub_proc::n_proc, :, t_idx] = sub_data[i_proc][4] self.mapping[sub_proc::n_proc, :, t_idx, :] = sub_data[i_proc][5] if any(np.array(int_q) == 'curly_A'): self.curly_A[sub_proc::n_proc, :, t_idx] = sub_data[i_proc][6] if any(np.array(int_q) == 'ee'): self.ee[sub_proc::n_proc, :, t_idx] = sub_data[i_proc][7] for i_proc in range(n_proc): proc[i_proc].terminate()
def calc_tensors(datatopdir, lskip_zeros=False, datadir='data/', rank=0, size=1, comm=None, proc=[0], l_mpi=True, iuxmxy=0, irhomxy=7, iTTmxy=6, first_alpha=9, l_correction=False, t_correction=0., yindex=[]): nt = None alltmp = 100000 dim = pc.read_dim() gc.garbage if len(yindex) == 0: iy = np.arange(dim.ny) else: iy = yindex os.chdir(datatopdir) # return to working directory av = [] if l_mpi: from mpi4py import MPI if proc.size < dim.nprocz: print('rank {}: proc.size {} < dim.nprocz {}'.format( rank, proc.size, dim.nprocz)) yproc = proc[0] / dim.nprocz aav, time = pc.read_zaver(datadir, proc=yproc) tmp = time.size else: print('rank {}: proc.size {} >= dim.nprocz {}'.format( rank, proc.size, dim.nprocz)) for iproc in range(0, proc.size, dim.nprocz): if iproc == 0: aav, time = pc.read_zaver(datadir, proc=proc[iproc] / dim.nprocz) tmp = time.size else: aav, time = pc.read_zaver(datadir, proc=proc[iproc] / dim.nprocz) tmp = min(time.size, tmp) else: av, time = pc.read_zaver(datadir) gc.garbage if l_mpi: print('rank {}: tmp {}'.format(rank, tmp)) if rank != 0: comm.send(tmp, dest=0, tag=rank) else: for irank in range(1, size): tmp = comm.recv(source=irank, tag=irank) alltmp = min(alltmp, tmp) nt = comm.bcast(alltmp, root=0) print('rank {}: nt {}'.format(rank, nt)) if proc.size < dim.nprocz: yndx = iy - yproc * (dim.nygrid / dim.nprocy) print('rank {}: yndx[0] {}'.format(rank, yndx[0])) av = aav[:nt, :, yndx, :] else: av = aav[:nt] for iproc in range(dim.nprocz, proc.size, dim.nprocz): aav, time = pc.read_zaver(datadir, tindex=(0, nt, 1), proc=proc[iproc] / dim.nprocz) av = np.concatenate((av, aav), axis=2) aav = [] print('rank {}: loaded av'.format(rank)) #where testfield calculated under old incorrect spec apply correction gc.garbage if l_correction: itcorr = np.where(time < t_correction)[0] av[itcorr, first_alpha + 2] *= -dim.nprocz / (dim.nprocz - 2.) for j in range(0, 3): av[itcorr, first_alpha + 5 + j] *= -dim.nprocz / (dim.nprocz - 2.) av[itcorr, first_alpha + 11] *= -dim.nprocz / (dim.nprocz - 2.) for j in range(0, 3): av[itcorr, first_alpha + 14 + j] *= -dim.nprocz / (dim.nprocz - 2.) av[itcorr, first_alpha + 20] *= -dim.nprocz / (dim.nprocz - 2.) for j in range(0, 3): av[itcorr, first_alpha + 23 + j] *= -dim.nprocz / (dim.nprocz - 2.) #factor by which to rescale code time to years trescale = 0.62 / 2.7e-6 / (365. * 86400.) #0.007281508 time *= trescale grid = pc.read_grid(datadir, trim=True, quiet=True) r, theta = np.meshgrid(grid.x, grid.y[iy]) gc.garbage #exclude zeros and next point if resetting of test fields is used if lskip_zeros: izer0 = np.where(av[:, first_alpha, av.shape[2] / 2, av.shape[3] / 2] == 0)[0] izer1 = np.where(av[:, first_alpha, av.shape[2] / 2, av.shape[3] / 2] == 0)[0] + 1 if izer0.size > 0: imask = np.delete(np.where(time[:nt]), [izer0, izer1]) else: imask = np.where(time[:nt])[0] else: imask = np.arange(time[:nt].size) if rank == 0: print('rank {}: calculating alp'.format(rank)) alp = np.zeros([3, 3, imask.size, av.shape[2], av.shape[3]]) eta = np.zeros([3, 3, 3, imask.size, av.shape[2], av.shape[3]]) urmst = np.zeros([3, 3, av.shape[2], av.shape[3]]) etat0 = np.zeros([3, 3, 3, av.shape[2], av.shape[3]]) #eta0 = np.zeros([3,3,3,imask.size,av.shape[2],av.shape[3]]) Hp = np.zeros([av.shape[2], av.shape[3]]) #compute rms velocity normalisation if rank == 0: print('rank {}: calculating urms'.format(rank)) urms = np.sqrt( np.mean(av[imask, iuxmxy + 3, :, :] - av[imask, iuxmxy + 0, :, :]**2 + av[imask, iuxmxy + 4, :, :] - av[imask, iuxmxy + 1, :, :]**2 + av[imask, iuxmxy + 5, :, :] - av[imask, iuxmxy + 2, :, :]**2, axis=0)) #compute turbulent diffusion normalisation cv, gm, alp_MLT = 0.6, 5. / 3, 5. / 3 pp = np.mean(av[imask, iTTmxy, :, :] * av[imask, irhomxy, :, :] * cv * (gm - 1), axis=0) if rank == 0: print('rank {}: completed pressure'.format(rank)) for i in range(0, av.shape[2]): Hp[i, :] = -1. / np.gradient(np.log(pp[i, :]), grid.dx) grid, pp = [], [] for i in range(0, 3): for j in range(0, 3): alp[i, j, :, :, :] = av[imask, first_alpha + 3 * j + i, :, :] urmst[i, j, :, :] = urms / 3. for k in range(0, 3): etat0[i, j, k, :, :] = urms * alp_MLT * Hp / 3. #for i in range(0,imask.size): # eta0[i,:,:,:,:,:] = etat0 if rank == 0: print('rank {}: calculating eta'.format(rank)) for j in range(0, 3): for k in range(0, 3): # Sign difference with Schrinner + r correction eta[j, k, 1, :, :, :] = -av[imask, first_alpha + 18 + 3 * k + j, :, :] * r eta[j, k, 0, :, :, :] = -av[imask, first_alpha + 9 + 3 * k + j, :, :] nnt, ny, nx = imask.size, av.shape[2], av.shape[3] av = [] irr, ith, iph = 0, 1, 2 # Create output tensors if rank == 0: print('rank {}: setting alp'.format(rank)) alpha = np.zeros([3, 3, nnt, ny, nx]) beta = np.zeros([3, 3, nnt, ny, nx]) gamma = np.zeros([3, nnt, ny, nx]) delta = np.zeros([3, nnt, ny, nx]) kappa = np.zeros([3, 3, 3, nnt, ny, nx]) # Alpha tensor if rank == 0: print('rank {}: calculating alpha'.format(rank)) alpha[irr, irr, :, :, :] = (alp[irr, irr, :, :, :] - eta[irr, ith, ith, :, :, :] / r) alpha[irr, ith, :, :, :] = 0.5 * ( alp[irr, ith, :, :, :] + eta[irr, irr, ith, :, :, :] / r + alp[ith, irr, :, :, :] - eta[ith, ith, ith, :, :, :] / r) alpha[irr, iph, :, :, :] = 0.5 * (alp[iph, irr, :, :, :] + alp[irr, iph, :, :, :] - eta[iph, ith, ith, :, :, :] / r) alpha[ith, irr, :, :, :] = alpha[irr, ith, :, :, :] alpha[ith, ith, :, :, :] = (alp[ith, ith, :, :, :] + eta[ith, irr, ith, :, :, :] / r) alpha[ith, iph, :, :, :] = 0.5 * (alp[iph, ith, :, :, :] + alp[ith, iph, :, :, :] + eta[iph, irr, ith, :, :, :] / r) alpha[iph, irr, :, :, :] = alpha[irr, iph, :, :, :] alpha[iph, ith, :, :, :] = alpha[ith, iph, :, :, :] alpha[iph, iph, :, :, :] = alp[iph, iph, :, :, :] # Gamma vector gamma[irr, :, :, :] = -0.5 * (alp[ith, iph, :, :, :] - alp[iph, ith, :, :, :] - eta[iph, irr, ith, :, :, :] / r) gamma[ith, :, :, :] = -0.5 * (alp[iph, irr, :, :, :] - alp[irr, iph, :, :, :] - eta[iph, ith, ith, :, :, :] / r) gamma[iph, :, :, :] = -0.5 * ( alp[irr, ith, :, :, :] - alp[ith, irr, :, :, :] + eta[irr, irr, ith, :, :, :] / r + eta[ith, ith, ith, :, :, :] / r) if rank == 0: print('rank {}: calculating beta'.format(rank)) alp = [] # Beta tensor beta[irr, irr, :, :, :] = -0.5 * eta[irr, iph, ith, :, :, :] beta[irr, ith, :, :, :] = 0.25 * (eta[irr, iph, irr, :, :, :] - eta[ith, iph, ith, :, :, :]) beta[irr, iph, :, :, :] = 0.25 * (eta[irr, irr, ith, :, :, :] - eta[iph, iph, ith, :, :, :] - eta[irr, ith, irr, :, :, :]) beta[ith, ith, :, :, :] = 0.5 * eta[ith, iph, irr, :, :, :] beta[ith, iph, :, :, :] = 0.25 * (eta[ith, irr, ith, :, :, :] + eta[iph, iph, irr, :, :, :] - eta[ith, ith, irr, :, :, :]) beta[iph, iph, :, :, :] = 0.5 * (eta[iph, irr, ith, :, :, :] - eta[iph, ith, irr, :, :, :]) beta[ith, irr, :, :, :] = beta[irr, ith, :, :, :] beta[iph, irr, :, :, :] = beta[irr, iph, :, :, :] beta[iph, ith, :, :, :] = beta[ith, iph, :, :, :] # Delta vector delta[irr, :, :, :] = 0.25 * (eta[ith, ith, irr, :, :, :] - eta[ith, irr, ith, :, :, :] + eta[iph, iph, irr, :, :, :]) delta[ith, :, :, :] = 0.25 * (eta[irr, irr, ith, :, :, :] - eta[irr, ith, irr, :, :, :] + eta[iph, iph, ith, :, :, :]) delta[iph, :, :, :] = -0.25 * (eta[irr, iph, irr, :, :, :] + eta[ith, iph, ith, :, :, :]) # Kappa tensor if rank == 0: print('rank {}: calculating kappa'.format(rank)) for i in range(0, 3): kappa[i, irr, irr, :, :, :] = -eta[i, irr, irr, :, :, :] kappa[i, irr, ith, :, :, :] = -0.5 * (eta[i, ith, irr, :, :, :] + eta[i, irr, ith, :, :, :]) kappa[i, irr, iph, :, :, :] = -0.5 * eta[i, iph, irr, :, :, :] kappa[i, ith, irr, :, :, :] = kappa[i, irr, ith, :, :, :] kappa[i, ith, ith, :, :, :] = -eta[i, ith, ith, :, :, :] kappa[i, ith, iph, :, :, :] = -0.5 * eta[i, iph, ith, :, :, :] kappa[i, iph, irr, :, :, :] = kappa[i, irr, iph, :, :, :] kappa[i, iph, ith, :, :, :] = kappa[i, ith, iph, :, :, :] #for it in range(0,nnt): # kappa[i,iph,iph,it,:,:]= 1e-9*etat0[i,0,0,:,:] eta = [] return alpha, beta, gamma, delta, kappa,\ time[imask], urmst, etat0
def tracers(traceField='bb', hMin=2e-3, hMax=2e4, lMax=500, tol=1e-2, interpolation='weighted', trace_sub=1, intQ=[''], varfile='VAR0', ti=-1, tf=-1, integration='simple', datadir='data/', destination='tracers.dat', nproc=1): """ Trace streamlines from the VAR files and integrate quantity 'intQ' along them. call signature:: tracers(field = 'bb', hMin = 2e-3, hMax = 2e2, lMax = 500, tol = 2e-3, interpolation = 'weighted', trace_sub = 1, intQ = '', varfile = 'VAR0', ti = -1, tf = -1, datadir = 'data', destination = 'tracers.dat', nproc = 1) Trace streamlines of the vectofield 'field' from z = z0 to z = z1 and integrate quantities 'intQ' along the lines. Creates a 2d mapping as in 'streamlines.f90'. Keyword arguments: *traceField*: Vector field used for the streamline tracing. *hMin*: Minimum step length for and underflow to occur. *hMax*: Parameter for the initial step length. *lMax*: Maximum length of the streamline. Integration will stop if l >= lMax. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'weighted': weights the adjacent grid points according to their distance. *trace_sub*: Number of sub-grid cells for the seeds. *intQ*: Quantities to be integrated along the streamlines. *varfile*: Varfile to be read. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *ti*: Initial VAR file index for tracer time sequences. Overrides 'varfile'. *tf*: Final VAR file index for tracer time sequences. Overrides 'varfile'. *datadir*: Directory where the data is stored. *destination*: Destination file. *nproc*: Number of cores for multi core computation. """ # returns the tracers for the specified starting locations def subTracers(q, vv, p, tracers0, iproc, hMin=2e-3, hMax=2e4, lMax=500, tol=1e-2, interpolation='weighted', integration='simple', intQ=['']): tracers = tracers0 mapping = np.zeros((tracers.shape[0], tracers.shape[1], 3)) for ix in range(tracers.shape[0]): for iy in range(tracers.shape[1]): xx = tracers[ix, iy, 2:5].copy() s = pc.stream(vv, p, interpolation=interpolation, integration=integration, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, xx=xx) tracers[ix, iy, 2:5] = s.tracers[s.sl - 1] tracers[ix, iy, 5] = s.l if (any(intQ == 'curlyA')): for l in range(s.sl - 1): aaInt = pc.vecInt( (s.tracers[l + 1] + s.tracers[l]) / 2, aa, p, interpolation) tracers[ix, iy, 6] += np.dot(aaInt, (s.tracers[l + 1] - s.tracers[l])) # create the color mapping if (tracers[ix, iy, 4] > grid.z[-2]): if (tracers[ix, iy, 0] - tracers[ix, iy, 2]) > 0: if (tracers[ix, iy, 1] - tracers[ix, iy, 3]) > 0: mapping[ix, iy, :] = [0, 1, 0] else: mapping[ix, iy, :] = [1, 1, 0] else: if (tracers[ix, iy, 1] - tracers[ix, iy, 3]) > 0: mapping[ix, iy, :] = [0, 0, 1] else: mapping[ix, iy, :] = [1, 0, 0] else: mapping[ix, iy, :] = [1, 1, 1] q.put((tracers, mapping, iproc)) # multi core setup if (np.isscalar(nproc) == False) or (nproc % 1 != 0): print("error: invalid processor number") return -1 queue = mp.Queue() # read the data # make sure to read the var files with the correct magic if (traceField == 'bb'): magic = 'bb' if (traceField == 'jj'): magic = 'jj' if (traceField == 'vort'): magic = 'vort' # convert intQ string into list if (isinstance(intQ, list) == False): intQ = [intQ] intQ = np.array(intQ) grid = pc.read_grid(datadir=datadir, trim=True, quiet=True) dim = pc.read_dim(datadir=datadir) tol2 = tol**2 # check if user wants a tracer time series if ((ti % 1 == 0) and (tf % 1 == 0) and (ti >= 0) and (tf >= ti)): series = True n_times = tf - ti + 1 else: series = False n_times = 1 tracers = np.zeros([ int(trace_sub * dim.nx), int(trace_sub * dim.ny), n_times, 6 + len(intQ) ]) mapping = np.zeros( [int(trace_sub * dim.nx), int(trace_sub * dim.ny), n_times, 3]) t = np.zeros(n_times) for tIdx in range(n_times): if series: varfile = 'VAR' + str(tIdx) # read the data var = pc.read_var(varfile=varfile, datadir=datadir, magic=magic, quiet=True, trimall=True) grid = pc.read_grid(datadir=datadir, quiet=True, trim=True) t[tIdx] = var.t # extract the requested vector traceField vv = getattr(var, traceField) if (any(intQ == 'curlyA')): aa = var.aa # initialize the parameters p = pc.pClass() p.dx = var.dx p.dy = var.dy p.dz = var.dz p.Ox = var.x[0] p.Oy = var.y[0] p.Oz = var.z[0] p.Lx = grid.Lx p.Ly = grid.Ly p.Lz = grid.Lz p.nx = dim.nx p.ny = dim.ny p.nz = dim.nz # initialize the tracers for ix in range(int(trace_sub * dim.nx)): for iy in range(int(trace_sub * dim.ny)): tracers[ix, iy, tIdx, 0] = grid.x[0] + int(grid.dx / trace_sub) * ix tracers[ix, iy, tIdx, 2] = tracers[ix, iy, tIdx, 0] tracers[ix, iy, tIdx, 1] = grid.y[0] + int(grid.dy / trace_sub) * iy tracers[ix, iy, tIdx, 3] = tracers[ix, iy, tIdx, 1] tracers[ix, iy, tIdx, 4] = grid.z[0] # declare vectors xMid = np.zeros(3) xSingle = np.zeros(3) xHalf = np.zeros(3) xDouble = np.zeros(3) tmp = [] subTracersLambda = lambda queue, vv, p, tracers, iproc: \ subTracers(queue, vv, p, tracers, iproc, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration, intQ = intQ) proc = [] for iproc in range(nproc): proc.append( mp.Process(target=subTracersLambda, args=(queue, vv, p, tracers[iproc::nproc, :, tIdx, :], iproc))) for iproc in range(nproc): proc[iproc].start() for iproc in range(nproc): tmp.append(queue.get()) for iproc in range(nproc): proc[iproc].join() for iproc in range(nproc): tracers[tmp[iproc][2]::nproc, :, tIdx, :], mapping[tmp[iproc][2]::nproc, :, tIdx, :] = (tmp[iproc][0], tmp[iproc][1]) for iproc in range(nproc): proc[iproc].terminate() tracers = np.copy(tracers.swapaxes(0, 3), order='C') if (destination != ''): f = open(datadir + destination, 'wb') f.write(np.array(trace_sub, dtype='float32')) # write tracers into file for tIdx in range(n_times): f.write(t[tIdx].astype('float32')) f.write(tracers[:, :, tIdx, :].astype('float32')) f.close() tracers = tracers.swapaxes(0, 3) tracers = tracers.swapaxes(0, 1) mapping = mapping.swapaxes(0, 1) return tracers, mapping, t
coordsystem = 99 ydim=unit_length zdim=unit_length elif (par.coord_system == 'cylindric'): coordsystem = 200 ydim=1. zdim=unit_length elif (par.coord_system == 'spherical'): coordsystem = 100 ydim=1. zdim=1. else: print "the world is flat and we never got here" #break grid=pc.read_grid(trim=True,datadir=datadir) dim=pc.read_dim(datadir=datadir) iformat=1 grid_style=0 gridinfo=0 if (dim.nx > 1): incl_x=1 dx=np.gradient(grid.x) else: incl_x=0 dx=np.repeat(grid.dx,dim.nx) if (dim.ny > 1): incl_y=1 dy=np.gradient(grid.y) else:
def find_fixed(self, data_dir='data/', destination='fixed_points.hf5', varfile='VAR0', ti=-1, tf=-1, trace_field='bb', h_min=2e-3, h_max=2e4, len_max=500, tol=1e-2, interpolation='trilinear', trace_sub=1, integration='simple', int_q=[''], n_proc=1): """ Find the fixed points. call signature:: find_fixed(data_dir='data/', destination='fixed_points.hf5', varfile='VAR0', ti=-1, tf=-1, trace_field='bb', h_min=2e-3, h_max=2e4, len_max=500, tol=1e-2, interpolation='trilinear', trace_sub=1, integration='simple', int_q=[''], n_proc=1): Finds the fixed points. Returns the fixed points positions. Keyword arguments: *data_dir*: Data directory. *destination*: Name of the fixed points file. *varfile*: Varfile to be read. *ti*: Initial VAR file index for tracer time sequences. *tf*: Final VAR file index for tracer time sequences. *trace_field*: Vector field used for the streamline tracing. *h_min*: Minimum step length for and underflow to occur. *h_max*: Parameter for the initial step length. *len_max*: Maximum length of the streamline. Integration will stop if l >= len_max. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'trilinear': weights the adjacent grid points according to their distance. *trace_sub*: Number of sub-grid cells for the seeds for the initial mapping. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *int_q*: Quantities to be integrated along the streamlines. *n_proc*: Number of cores for multi core computation. """ # Return the fixed points for a subset of the domain for the initial # fixed points. def __sub_fixed_init(queue, ix0, iy0, field, tracers, var, i_proc): diff = np.zeros((4, 2)) fixed = [] fixed_sign = [] fidx = 0 poincare_array = np.zeros((tracers.x0[i_proc::self.params.n_proc].shape[0], tracers.x0.shape[1])) for ix in ix0[i_proc::self.params.n_proc]: for iy in iy0: # Compute Poincare index around this cell (!= 0 for potential fixed point). diff[0, :] = np.array([tracers.x1[ix, iy, 0] - tracers.x0[ix, iy, 0], tracers.y1[ix, iy, 0] - tracers.y0[ix, iy, 0]]) diff[1, :] = np.array([tracers.x1[ix+1, iy, 0] - tracers.x0[ix+1, iy, 0], tracers.y1[ix+1, iy, 0] - tracers.y0[ix+1, iy, 0]]) diff[2, :] = np.array([tracers.x1[ix+1, iy+1, 0] - tracers.x0[ix+1, iy+1, 0], tracers.y1[ix+1, iy+1, 0] - tracers.y0[ix+1, iy+1, 0]]) diff[3, :] = np.array([tracers.x1[ix, iy+1, 0] - tracers.x0[ix, iy+1, 0], tracers.y1[ix, iy+1, 0] - tracers.y0[ix, iy+1, 0]]) if sum(np.sum(diff**2, axis=1) != 0): diff = np.swapaxes(np.swapaxes(diff, 0, 1) / np.sqrt(np.sum(diff**2, axis=1)), 0, 1) poincare = __poincare_index(field, tracers.x0[ix:ix+2, iy, 0], tracers.y0[ix, iy:iy+2, 0], diff) poincare_array[ix/n_proc, iy] = poincare if abs(poincare) > 5: # Use 5 instead of 2*pi to account for rounding errors. # Subsample to get starting point for iteration. nt = 4 xmin = tracers.x0[ix, iy, 0] ymin = tracers.y0[ix, iy, 0] xmax = tracers.x0[ix+1, iy, 0] ymax = tracers.y0[ix, iy+1, 0] xx = np.zeros((nt**2, 3)) tracers_part = np.zeros((nt**2, 5)) i1 = 0 for j1 in range(nt): for k1 in range(nt): xx[i1, 0] = xmin + j1/(nt-1.)*(xmax-xmin) xx[i1, 1] = ymin + k1/(nt-1.)*(ymax-ymin) xx[i1, 2] = self.params.Oz i1 += 1 for it1 in range(nt**2): stream = Stream(field, self.params, h_min=self.params.h_min, h_max=self.params.h_max, len_max=self.params.len_max, tol=self.params.tol, interpolation=self.params.interpolation, integration=self.params.integration, xx=xx[it1, :]) tracers_part[it1, 0:2] = xx[it1, 0:2] tracers_part[it1, 2:] = stream.tracers[stream.stream_len-1, :] min2 = 1e6 minx = xmin miny = ymin i1 = 0 for j1 in range(nt): for k1 in range(nt): diff2 = (tracers_part[i1+k1*nt, 2] - \ tracers_part[i1+k1*nt, 0])**2 + \ (tracers_part[i1+k1*nt, 3] - \ tracers_part[i1+k1*nt, 1])**2 if diff2 < min2: min2 = diff2 minx = xmin + j1/(nt-1.)*(xmax - xmin) miny = ymin + k1/(nt-1.)*(ymax - ymin) it1 += 1 # Get fixed point from this starting position using Newton's method. point = np.array([minx, miny]) fixed_point = __null_point(point, var) # Check if fixed point lies inside the cell. if ((fixed_point[0] < tracers.x0[ix, iy, 0]) or (fixed_point[0] > tracers.x0[ix+1, iy, 0]) or (fixed_point[1] < tracers.y0[ix, iy, 0]) or (fixed_point[1] > tracers.y0[ix, iy+1, 0])): print "warning: fixed point lies outside the cell" else: fixed.append(fixed_point) fixed_sign.append(np.sign(poincare)) fidx += np.sign(poincare) queue.put((i_proc, fixed, fixed_sign, fidx, poincare_array)) # Finds the Poincare index of this grid cell. def __poincare_index(field, sx, sy, diff): poincare = 0 poincare += __edge(field, [sx[0], sx[1]], [sy[0], sy[0]], diff[0, :], diff[1, :], 0) poincare += __edge(field, [sx[1], sx[1]], [sy[0], sy[1]], diff[1, :], diff[2, :], 0) poincare += __edge(field, [sx[1], sx[0]], [sy[1], sy[1]], diff[2, :], diff[3, :], 0) poincare += __edge(field, [sx[0], sx[0]], [sy[1], sy[0]], diff[3, :], diff[0, :], 0) return poincare # Compute rotation along one edge. def __edge(field, sx, sy, diff1, diff2, rec): phiMin = np.pi/8. dtot = m.atan2(diff1[0]*diff2[1] - diff2[0]*diff1[1], diff1[0]*diff2[0] + diff1[1]*diff2[1]) if (abs(dtot) > phiMin) and (rec < 4): xm = 0.5*(sx[0]+sx[1]) ym = 0.5*(sy[0]+sy[1]) # Trace the intermediate field line. stream = Stream(field, self.params, h_min=self.params.h_min, h_max=self.params.h_max, len_max=self.params.len_max, tol=self.params.tol, interpolation=self.params.interpolation, integration=self.params.integration, xx=np.array([xm, ym, self.params.Oz])) stream_x0 = stream.tracers[0, 0] stream_y0 = stream.tracers[0, 1] stream_x1 = stream.tracers[stream.stream_len-1, 0] stream_y1 = stream.tracers[stream.stream_len-1, 1] stream_z1 = stream.tracers[stream.stream_len-1, 2] # Discard any streamline which does not converge or hits the boundary. if ((stream.len >= len_max) or (stream_z1 < self.params.Oz+self.params.Lz-self.params.dz)): dtot = 0. else: diffm = np.array([stream_x1 - stream_x0, stream_y1 - stream_y0]) if sum(diffm**2) != 0: diffm = diffm/np.sqrt(sum(diffm**2)) dtot = __edge(field, [sx[0], xm], [sy[0], ym], diff1, diffm, rec+1) + \ __edge(field, [xm, sx[1]], [ym, sy[1]], diffm, diff2, rec+1) return dtot # Finds the null point of the mapping, i.e. fixed point, using Newton's method. def __null_point(point, var): dl = np.min(var.dx, var.dy)/100. it = 0 # Tracers used to find the fixed point. tracers_null = np.zeros((5, 4)) while True: # Trace field lines at original point and for Jacobian. # (second order seems to be enough) xx = np.zeros((5, 3)) xx[0, :] = np.array([point[0], point[1], self.params.Oz]) xx[1, :] = np.array([point[0]-dl, point[1], self.params.Oz]) xx[2, :] = np.array([point[0]+dl, point[1], self.params.Oz]) xx[3, :] = np.array([point[0], point[1]-dl, self.params.Oz]) xx[4, :] = np.array([point[0], point[1]+dl, self.params.Oz]) for it1 in range(5): stream = Stream(field, self.params, h_min=self.params.h_min, h_max=self.params.h_max, len_max=self.params.len_max, tol=self.params.tol, interpolation=self.params.interpolation, integration=self.params.integration, xx=xx[it1, :]) tracers_null[it1, :2] = xx[it1, :2] tracers_null[it1, 2:] = stream.tracers[stream.stream_len-1, 0:2] # Check function convergence. ff = np.zeros(2) ff[0] = tracers_null[0, 2] - tracers_null[0, 0] ff[1] = tracers_null[0, 3] - tracers_null[0, 1] if sum(abs(ff)) <= 1e-3*np.min(self.params.dx, self.params.dy): fixed_point = np.array([point[0], point[1]]) break # Compute the Jacobian. fjac = np.zeros((2, 2)) fjac[0, 0] = ((tracers_null[2, 2] - tracers_null[2, 0]) - (tracers_null[1, 2] - tracers_null[1, 0]))/2./dl fjac[0, 1] = ((tracers_null[4, 2] - tracers_null[4, 0]) - (tracers_null[3, 2] - tracers_null[3, 0]))/2./dl fjac[1, 0] = ((tracers_null[2, 3] - tracers_null[2, 1]) - (tracers_null[1, 3] - tracers_null[1, 1]))/2./dl fjac[1, 1] = ((tracers_null[4, 3] - tracers_null[4, 1]) - (tracers_null[3, 3] - tracers_null[3, 1]))/2./dl # Invert the Jacobian. fjin = np.zeros((2, 2)) det = fjac[0, 0]*fjac[1, 1] - fjac[0, 1]*fjac[1, 0] if abs(det) < dl: fixed_point = point break fjin[0, 0] = fjac[1, 1] fjin[1, 1] = fjac[0, 0] fjin[0, 1] = -fjac[0, 1] fjin[1, 0] = -fjac[1, 0] fjin = fjin/det dpoint = np.zeros(2) dpoint[0] = -fjin[0, 0]*ff[0] - fjin[0, 1]*ff[1] dpoint[1] = -fjin[1, 0]*ff[0] - fjin[1, 1]*ff[1] point += dpoint # Check root convergence. if sum(abs(dpoint)) < 1e-3*np.min(self.params.dx, self.params.dy): fixed_point = point break if it > 20: fixed_point = point print "warning: Newton did not converged" break it += 1 return fixed_point # Find the fixed point using Newton's method, starting at previous fixed point. def __sub_fixed_series(queue, t_idx, field, var, i_proc): fixed = [] fixed_sign = [] for i, point in enumerate(self.fixed_points[t_idx-1][i_proc::self.params.n_proc]): fixed_tentative = __null_point(point, var) # Check if the fixed point lies outside the domain. if fixed_tentative[0] >= self.params.Ox and \ fixed_tentative[1] >= self.params.Oy and \ fixed_tentative[0] <= self.params.Ox+self.params.Lx and \ fixed_tentative[1] <= self.params.Oy+self.params.Ly: fixed.append(fixed_tentative) fixed_sign.append(self.fixed_sign[t_idx-1][i_proc+i*n_proc]) queue.put((i_proc, fixed, fixed_sign)) # Discard fixed points which are too close to each other. def __discard_close_fixed_points(fixed, fixed_sign, var): fixed_new = [] fixed_new.append(fixed[0]) fixed_sign_new = [] fixed_sign_new.append(fixed_sign[0]) dx = fixed[:, 0] - np.reshape(fixed[:, 0], (fixed.shape[0], 1)) dy = fixed[:, 1] - np.reshape(fixed[:, 1], (fixed.shape[0], 1)) mask = (abs(dx) > var.dx/2) + (abs(dy) > var.dy/2) for idx in range(1, fixed.shape[0]): if all(mask[idx, :idx]): fixed_new.append(fixed[idx]) fixed_sign_new.append(fixed_sign[idx]) return np.array(fixed_new), np.array(fixed_sign_new) # Convert int_q string into list. if not isinstance(int_q, list): int_q = [int_q] self.params.int_q = int_q if any(np.array(self.params.int_q) == 'curly_A'): self.curly_A = [] if any(np.array(self.params.int_q) == 'ee'): self.ee = [] # Multi core setup. if not(np.isscalar(n_proc)) or (n_proc%1 != 0): print "error: invalid processor number" return -1 queue = mp.Queue() # Write the tracing parameters. self.params = TracersParameterClass() self.params.trace_field = trace_field self.params.h_min = h_min self.params.h_max = h_max self.params.len_max = len_max self.params.tol = tol self.params.interpolation = interpolation self.params.trace_sub = trace_sub self.params.int_q = int_q self.params.varfile = varfile self.params.ti = ti self.params.tf = tf self.params.integration = integration self.params.data_dir = data_dir self.params.destination = destination self.params.n_proc = n_proc # Multi core setup. if not(np.isscalar(n_proc)) or (n_proc%1 != 0): print "error: invalid processor number" return -1 # Make sure to read the var files with the correct magic. magic = [] if trace_field == 'bb': magic.append('bb') if trace_field == 'jj': magic.append('jj') if trace_field == 'vort': magic.append('vort') if any(np.array(int_q) == 'ee'): magic.append('bb') magic.append('jj') dim = pc.read_dim(datadir=data_dir) # Check if user wants a tracer time series. if (ti%1 == 0) and (tf%1 == 0) and (ti >= 0) and (tf >= ti): series = True varfile = 'VAR' + str(ti) n_times = tf-ti+1 else: series = False n_times = 1 self.t = np.zeros(n_times) # Read the initial field. var = pc.read_var(varfile=varfile, datadir=data_dir, magic=magic, quiet=True, trimall=True) self.t[0] = var.t grid = pc.read_grid(datadir=data_dir, quiet=True, trim=True) field = getattr(var, trace_field) param2 = pc.read_param(datadir=data_dir, param2=True, quiet=True) if any(np.array(int_q) == 'ee'): ee = var.jj*param2.eta - pc.cross(var.uu, var.bb) # Get the simulation parameters. self.params.dx = var.dx self.params.dy = var.dy self.params.dz = var.dz self.params.Ox = var.x[0] self.params.Oy = var.y[0] self.params.Oz = var.z[0] self.params.Lx = grid.Lx self.params.Ly = grid.Ly self.params.Lz = grid.Lz self.params.nx = dim.nx self.params.ny = dim.ny self.params.nz = dim.nz # Create the initial mapping. tracers = Tracers() tracers.find_tracers(trace_field='bb', h_min=h_min, h_max=h_max, len_max=len_max, tol=tol, interpolation=interpolation, trace_sub=trace_sub, varfile=varfile, integration=integration, data_dir=data_dir, int_q=int_q, n_proc=n_proc) self.tracers = tracers # Set some default values. self.t = np.zeros((tf-ti+1)*series + (1-series)) self.fidx = np.zeros((tf-ti+1)*series + (1-series)) self.poincare = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny)]) ix0 = range(0, int(self.params.nx*trace_sub)-1) iy0 = range(0, int(self.params.ny*trace_sub)-1) # Start the parallelized fixed point finding for the initial time. proc = [] sub_data = [] fixed = [] fixed_sign = [] for i_proc in range(n_proc): proc.append(mp.Process(target=__sub_fixed_init, args=(queue, ix0, iy0, field, tracers, var, i_proc))) for i_proc in range(n_proc): proc[i_proc].start() for i_proc in range(n_proc): sub_data.append(queue.get()) for i_proc in range(n_proc): proc[i_proc].join() for i_proc in range(n_proc): # Extract the data from the single cores. Mind the order. sub_proc = sub_data[i_proc][0] fixed.extend(sub_data[i_proc][1]) fixed_sign.extend(sub_data[i_proc][2]) self.fidx[0] += sub_data[i_proc][3] self.poincare[sub_proc::n_proc, :] = sub_data[i_proc][4] for i_proc in range(n_proc): proc[i_proc].terminate() # Discard fixed points which lie too close to each other. fixed, fixed_sign = __discard_close_fixed_points(np.array(fixed), np.array(fixed_sign), var) self.fixed_points.append(np.array(fixed)) self.fixed_sign.append(np.array(fixed_sign)) # Find the fixed points for the remaining times. for t_idx in range(1, n_times): # Read the data. varfile = 'VAR' + str(t_idx+ti) var = pc.read_var(varfile=varfile, datadir=data_dir, magic=magic, quiet=True, trimall=True) field = getattr(var, trace_field) self.t[t_idx] = var.t # Find the new fixed points. proc = [] sub_data = [] fixed = [] fixed_sign = [] for i_proc in range(n_proc): proc.append(mp.Process(target=__sub_fixed_series, args=(queue, t_idx, field, var, i_proc))) for i_proc in range(n_proc): proc[i_proc].start() for i_proc in range(n_proc): sub_data.append(queue.get()) for i_proc in range(n_proc): proc[i_proc].join() for i_proc in range(n_proc): # Extract the data from the single cores. Mind the order. sub_proc = sub_data[i_proc][0] fixed.extend(sub_data[i_proc][1]) fixed_sign.extend(sub_data[i_proc][2]) for i_proc in range(n_proc): proc[i_proc].terminate() # Discard fixed points which lie too close to each other. fixed, fixed_sign = __discard_close_fixed_points(np.array(fixed), np.array(fixed_sign), var) self.fixed_points.append(np.array(fixed)) self.fixed_sign.append(np.array(fixed_sign)) self.fidx[t_idx] = np.sum(fixed_sign) # Compute the traced quantities. if any(np.array(self.params.int_q) == 'curly_A') or \ any(np.array(self.params.int_q) == 'ee'): for t_idx in range(0, n_times): if any(np.array(self.params.int_q) == 'curly_A'): self.curly_A.append([]) if any(np.array(self.params.int_q) == 'ee'): self.ee.append([]) for fixed in self.fixed_points[t_idx]: # Trace the stream line. xx = np.array([fixed[0], fixed[1], self.params.Oz]) stream = Stream(field, self.params, h_min=self.params.h_min, h_max=self.params.h_max, len_max=self.params.len_max, tol=self.params.tol, interpolation=self.params.interpolation, integration=self.params.integration, xx=xx) # Do the field line integration. if any(np.array(self.params.int_q) == 'curly_A'): curly_A = 0 for l in range(stream.stream_len-1): aaInt = vec_int((stream.tracers[l+1] + stream.tracers[l])/2, var, var.aa, interpolation=self.params.interpolation) curly_A += np.dot(aaInt, (stream.tracers[l+1] - stream.tracers[l])) self.curly_A[-1].append(curly_A) if any(np.array(self.params.int_q) == 'ee'): ee_p = 0 for l in range(stream.stream_len-1): eeInt = vec_int((stream.tracers[l+1] + stream.tracers[l])/2, var, ee, interpolation=self.params.interpolation) ee_p += np.dot(eeInt, (stream.tracers[l+1] - stream.tracers[l])) self.ee[-1].append(ee_p) if any(np.array(self.params.int_q) == 'curly_A'): self.curly_A[-1] = np.array(self.curly_A[-1]) if any(np.array(self.params.int_q) == 'ee'): self.ee[-1] = np.array(self.ee[-1])
#!/usr/bin/env python import pencil as pc P = pc.P dim = pc.read_dim() index = pc.read_index() param = pc.read_param(quiet=True) grid = pc.read_grid(trim=True, param=param, quiet=True) P.ion() P.figure(figsize=(6, 6), dpi=64) frame = grid.x.min(), grid.x.max(), grid.y.min(), grid.y.max() P.subplots_adjust(bottom=0, top=1, left=0, right=1) x0 = grid.x.mean() P.axvline(x0 + 0.5, color='black', linestyle='--') P.axvline(x0 - 0.5, color='black', linestyle='--') P.axhline(0.5, color='black', linestyle='--') P.axhline(-0.5, color='black', linestyle='--') for ivar in range(0, 8): print "read VAR%d" % ivar var = pc.read_var(ivar=ivar, run2D=param.lwrite_2d, param=param, dim=dim, index=index, quiet=True, trimall=True) f = var.lnrho[dim.nz / 2, ...]
def pc2vtk(varfile = 'var.dat', datadir = 'data/', proc = -1, variables = ['rho','uu','bb'], magic = [], b_ext = False, destination = 'work', quiet = True): """ Convert data from PencilCode format to vtk. call signature:: pc2vtk(varfile = 'var.dat', datadir = 'data/', proc = -1, variables = ['rho','uu','bb'], magic = [], destination = 'work.vtk') Read *varfile* and convert its content into vtk format. Write the result in *destination*. Keyword arguments: *varfile*: The original varfile. *datadir*: Directory where the data is stored. *proc*: Processor which should be read. Set to -1 for all processors. *variables* = [ 'rho' , 'lnrho' , 'uu' , 'bb', 'b_mag', 'jj', 'j_mag', 'aa', 'ab', 'TT', 'lnTT', 'cc', 'lncc', 'ss', 'vort' ] Variables which should be written. *magic*: [ 'vort' , 'bb' ] Additional variables which should be written. *b_ext*: Add the external magnetic field. *destination*: Destination file. *quiet*: Keep quiet when reading the var files. """ # this should correct for the case the user type only one variable if (len(magic) > 0): if (len(magic[0]) == 1): magic = [magic] # make sure magic is set when writing 'vort' or 'bb' try: index = variables.index('vort') magic.append('vort') except: pass try: index = variables.index('bb') magic.append('bb') except: pass try: index = variables.index('b_mag') magic.append('bb') except: pass try: index = variables.index('jj') magic.append('jj') except: pass try: index = variables.index('j_mag') magic.append('jj') except: pass # reading pc variables and setting dimensions var = pc.read_var(varfile = varfile, datadir = datadir, proc = proc, magic = magic, trimall = True, quiet = quiet) grid = pc.read_grid(datadir = datadir, proc = proc, trim = True, quiet = True) params = pc.read_param(param2 = True, quiet = True) B_ext = np.array(params.b_ext) # add external magnetic field if (b_ext == True): var.bb[0,...] += B_ext[0] var.bb[1,...] += B_ext[1] var.bb[2,...] += B_ext[2] dimx = len(grid.x) dimy = len(grid.y) dimz = len(grid.z) dim = dimx * dimy * dimz dx = (np.max(grid.x) - np.min(grid.x))/(dimx-1) dy = (np.max(grid.y) - np.min(grid.y))/(dimy-1) dz = (np.max(grid.z) - np.min(grid.z))/(dimz-1) fd = open(destination + '.vtk', 'wb') fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8')) fd.write('VAR files\n'.encode('utf-8')) fd.write('BINARY\n'.encode('utf-8')) fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8')) fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(dimx, dimy, dimz).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(grid.x[0], grid.y[0], grid.z[0]).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(dx, dy, dz).encode('utf-8')) fd.write('POINT_DATA {0:9}\n'.format(dim).encode('utf-8')) # this should correct for the case the user type only one variable if (len(variables) > 0): if (len(variables[0]) == 1): variables = [variables] try: index = variables.index('rho') print('writing rho') fd.write('SCALARS rho float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.rho[k,j,i])) except: pass try: index = variables.index('lnrho') print('writing lnrho') fd.write('SCALARS lnrho float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lnrho[k,j,i])) except: pass try: index = variables.index('uu') print('writing uu') fd.write('VECTORS vfield float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.uu[0,k,j,i])) fd.write(struct.pack(">f", var.uu[1,k,j,i])) fd.write(struct.pack(">f", var.uu[2,k,j,i])) except: pass try: index = variables.index('bb') print('writing bb') fd.write('VECTORS bfield float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.bb[0,k,j,i])) fd.write(struct.pack(">f", var.bb[1,k,j,i])) fd.write(struct.pack(">f", var.bb[2,k,j,i])) except: pass try: index = variables.index('b_mag') b_mag = np.sqrt(pc.dot2(var.bb)) print('writing b_mag') fd.write('SCALARS b_mag float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", b_mag[k,j,i])) except: pass try: index = variables.index('jj') print('writing jj') fd.write('VECTORS jfield float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.jj[0,k,j,i])) fd.write(struct.pack(">f", var.jj[1,k,j,i])) fd.write(struct.pack(">f", var.jj[2,k,j,i])) except: pass try: index = variables.index('j_mag') j_mag = np.sqrt(pc.dot2(var.jj)) print('writing j_mag') fd.write('SCALARS j_mag float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", j_mag[k,j,i])) except: pass try: index = variables.index('aa') print('writing aa') fd.write('VECTORS afield float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.aa[0,k,j,i])) fd.write(struct.pack(">f", var.aa[1,k,j,i])) fd.write(struct.pack(">f", var.aa[2,k,j,i])) except: pass try: index = variables.index('ab') ab = pc.dot(var.aa, var.bb) print('writing ab') fd.write('SCALARS ab float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", ab[k,j,i])) except: pass try: index = variables.index('TT') print('writing TT') fd.write('SCALARS TT float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.TT[k,j,i])) except: pass try: index = variables.index('lnTT') print('writing lnTT') fd.write('SCALARS lnTT float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lnTT[k,j,i])) except: pass try: index = variables.index('cc') print('writing cc') fd.write('SCALARS cc float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.cc[k,j,i])) except: pass try: index = variables.index('lncc') print('writing lncc') fd.write('SCALARS lncc float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lncc[k,j,i])) except: pass try: index = variables.index('ss') print('writing ss') fd.write('SCALARS ss float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.ss[k,j,i])) except: pass try: index = variables.index('vort') print('writing vort') fd.write('VECTORS vorticity float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.vort[0,k,j,i])) fd.write(struct.pack(">f", var.vort[1,k,j,i])) fd.write(struct.pack(">f", var.vort[2,k,j,i])) except: pass del(var) fd.close()
def __init__(self, dataDir='data/', fileName='var.dat', streamFile='stream.vtk', interpolation='weighted', integration='RK6', hMin=2e-3, hMax=2e4, lMax=500, tol=1e-2, iterMax=1e3, xx=np.array([0, 0, 0])): """ Creates, and returns the traced streamline. call signature: streamInit(datadir = 'data/', fileName = 'save.dat, interpolation = 'weighted', integration = 'simple', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, iterMax = 1e3, xx = np.array([0,0,0])) Trace magnetic streamlines. Keyword arguments: *dataDir*: Data directory. *fileName*: Name of the file with the field information. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'weighted': weights the adjacent grid points according to their distance. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *hMin*: Minimum step length for and underflow to occur. *hMax*: Parameter for the initial step length. *lMax*: Maximum length of the streamline. Integration will stop if l >= lMax. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *iterMax*: Maximum number of iterations. *xx*: Initial seeds. """ # read the data var = pc.read_var(datadir=dataDir, varfile=fileName, magic='bb', quiet=True, trimall=True) grid = pc.read_grid(datadir=dataDir, quiet=True) vv = var.bb p = pClass() p.dx = var.dx p.dy = var.dy p.dz = var.dz p.Ox = var.x[0] p.Oy = var.y[0] p.Oz = var.z[0] p.Lx = grid.Lx p.Ly = grid.Ly p.Lz = grid.Lz p.nx = var.bb.shape[1] p.ny = var.bb.shape[2] p.nz = var.bb.shape[3] ss = [] for i in range(xx.shape[1]): s = streamSingle(vv, p, interpolation='weighted', integration='simple', hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, iterMax=iterMax, xx=xx[:, i]) ss.append(s) slMax = 0 for i in range(xx.shape[1]): if (slMax < ss[i].sl): slMax = ss[i].sl self.tracers = np.zeros((xx.shape[1], slMax, 3)) + np.nan self.sl = np.zeros(xx.shape[1], dtype='int32') self.l = np.zeros(xx.shape[1]) for i in range(xx.shape[1]): self.tracers[i, :ss[i].sl, :] = ss[i].tracers self.sl[i] = ss[i].sl self.l[i] = ss[i].l self.p = s.p self.nt = xx.shape[1] # save into vtk file if (streamFile != []): writer = vtk.vtkPolyDataWriter() writer.SetFileName(dataDir + '/' + streamFile) polyData = vtk.vtkPolyData() fieldData = vtk.vtkFieldData() # field containing length of stream lines for later decomposition field = VN.numpy_to_vtk(self.l) field.SetName('l') fieldData.AddArray(field) field = VN.numpy_to_vtk(self.sl.astype(np.int32)) field.SetName('sl') fieldData.AddArray(field) # streamline parameters tmp = range(10) tmp[0] = np.array([hMin], dtype='float32') field = VN.numpy_to_vtk(tmp[0]) field.SetName('hMin') fieldData.AddArray(field) tmp[1] = np.array([hMax], dtype='float32') field = VN.numpy_to_vtk(tmp[1]) field.SetName('hMax') fieldData.AddArray(field) tmp[2] = np.array([lMax], dtype='float32') field = VN.numpy_to_vtk(tmp[2]) field.SetName('lMax') fieldData.AddArray(field) tmp[3] = np.array([tol], dtype='float32') field = VN.numpy_to_vtk(tmp[3]) field.SetName('tol') fieldData.AddArray(field) tmp[4] = np.array([iterMax], dtype='int32') field = VN.numpy_to_vtk(tmp[4]) field.SetName('iterMax') fieldData.AddArray(field) tmp[5] = np.array([self.nt], dtype='int32') field = VN.numpy_to_vtk(tmp[5]) field.SetName('nt') fieldData.AddArray(field) # fields containing simulation parameters stored in paramFile dic = dir(p) params = range(len(dic)) i = 0 for attr in dic: if (attr[0] != '_'): params[i] = getattr(p, attr) params[i] = np.array([params[i]], dtype=type(params[i])) field = VN.numpy_to_vtk(params[i]) field.SetName(attr) fieldData.AddArray(field) i += 1 # all streamlines as continuous array of points points = vtk.vtkPoints() for i in range(xx.shape[1]): for sl in range(self.sl[i]): points.InsertNextPoint(self.tracers[i, sl, :]) polyData.SetPoints(points) polyData.SetFieldData(fieldData) writer.SetInput(polyData) writer.SetFileTypeToBinary() writer.Write()
def pc2vtkxml(varfile = 'var.dat', datadir = 'data/', proc = -1, variables = ['rho','uu','bb'], magic = [], destination = 'work', quiet = True): """ Convert data from PencilCode format to XML vtk. Write .vts Structured Grid, not Rectilinear Grid as VisIt screws up reading Rectilinear Grid. However, this is set to write large grids in VTK XML, which is not yet suported by VisIt anyways. Use ParaView. call signature:: pc2xmlvtk(varfile = 'var.dat', datadir = 'data/', proc = -1, variables = ['rho','uu','bb'], magic = [], destination = 'work.vtk') Read *varfile* and convert its content into vtk format. Write the result in *destination*. Keyword arguments: *varfile*: The original varfile. *datadir*: Directory where the data is stored. *proc*: Processor which should be read. Set to -1 for all processors. *variables* = [ 'rho' , 'lnrho' , 'uu' , 'bb', 'b_mag', 'jj', 'j_mag', 'aa', 'tt', 'lnTT', 'cc', 'lncc', 'ss', 'vort', 'eth' ] Variables which should be written. *magic*: [ 'vort' , 'bb' ] Additional variables which should be written. *destination*: Destination file. """ # this should correct for the case the user type only one variable if (len(magic) > 0): if (len(magic[0]) == 1): magic = [magic] # make sure magic is set when writing 'vort' or 'bb' try: index = variables.index('vort') magic.append('vort') except: pass try: index = variables.index('bb') magic.append('bb') except: pass try: index = variables.index('b_mag') magic.append('bb') except: pass try: index = variables.index('tt') magic.append('tt') except: pass # get endian format of the data format = pc.get_format(datadir = datadir) # reading pc variables and setting dimensions var = pc.read_var(varfile = varfile, datadir = datadir, proc = proc, magic = magic, trimall = True, quiet = quiet, format = format) grid = pc.read_grid(datadir = datadir, proc = proc, trim = True, quiet = True, format = format) dimx = len(grid.x) dimy = len(grid.y) dimz = len(grid.z) dim = dimx * dimy * dimz scalardata = {} if ('rho' in variables) : rho = np.transpose(var.rho.copy()) scalardata['rho'] = rho if ('lnrho' in variables) : lnrho = np.transpose(var.lnrho.copy()) scalardata['lnrho'] = lnrho if ('tt' in variables) : tt = np.transpose(var.tt.copy()) scalardata['tt'] = tt if ('lntt' in variables) : lntt = np.transpose(var.lntt.copy()) scalardata['lntt'] = lntt if ('cc' in variables) : cc = np.transpose(var.cc.copy()) scalardata['cc'] = cc if ('lncc' in variables) : lncc = np.transpose(var.lncc.copy()) scalardata['lncc'] = lncc if ('ss' in variables) : ss = np.transpose(var.ss.copy()) scalardata['ss'] = ss if ('eth' in variables) : eth = np.transpose(var.eth.copy()) scalardata['eth'] = eth vectordata = {} if ('uu' in variables) : uu1 = np.transpose(var.uu[0,:,:,:].copy()) uu2 = np.transpose(var.uu[1,:,:,:].copy()) uu3 = np.transpose(var.uu[2,:,:,:].copy()) vectordata['uu'] = (uu1,uu2,uu3) if ('bb' in variables) : bb1 = np.transpose(var.bb[0,:,:,:].copy()) bb2 = np.transpose(var.bb[1,:,:,:].copy()) bb3 = np.transpose(var.bb[2,:,:,:].copy()) vectordata['bb'] = (bb1,bb2,bb3) if ('jj' in variables) : jj1 = np.transpose(var.jj[0,:,:,:].copy()) jj2 = np.transpose(var.jj[1,:,:,:].copy()) jj3 = np.transpose(var.jj[2,:,:,:].copy()) vectordata['jj'] = (jj1,jj2,jj3) if ('aa' in variables) : aa1 = np.transpose(var.aa[0,:,:,:].copy()) aa2 = np.transpose(var.aa[1,:,:,:].copy()) aa3 = np.transpose(var.aa[2,:,:,:].copy()) vectordata['aa'] = (aa1,aa2,aa3) if ('vort' in variables) : vort1 = np.transpose(var.vort[0,:,:,:].copy()) vort2 = np.transpose(var.vort[1,:,:,:].copy()) vort3 = np.transpose(var.vort[2,:,:,:].copy()) vectordata['vort'] = (vort1,vort2,vort3) X = np.zeros([dimx,dimy,dimz]) Y = np.zeros([dimx,dimy,dimz]) Z = np.zeros([dimx,dimy,dimz]) for k in range(dimz): for j in range(dimy): for i in range(dimx): X[i,j,k] = grid.x[i] Y[i,j,k] = grid.y[j] Z[i,j,k] = grid.z[k] start = (0,0,0) end = (dimx-1, dimy-1, dimz-1) time = np.array([var.t]) w = VtkFile(destination, VtkStructuredGrid,largeFile=True) w.openGrid(start = start, end = end) #this s for wirting Time in VisIt files. However, when usign large grid Visit does not work anyways. #w.openFieldData() #w.addTuple('TIME', time.dtype.name,len(time)) #w.closeFieldData() w.openPiece(start = start, end = end) w.openElement("Points") w.addData("points", (X,Y,Z)) w.closeElement("Points") w.openData("Point", scalars = scalardata.keys(), vectors = vectordata.keys()) for key in scalardata: w.addData(key,scalardata[key]) for key in vectordata: w.addData(key,vectordata[key]) w.closeData("Point") w.closePiece() w.closeGrid() #w.appendData( time ) w.appendData( (X,Y,Z) ) for key in scalardata: w.appendData(data = scalardata[key]) for key in vectordata: w.appendData(data = vectordata[key]) w.save()
def read_tracers(datadir='data/', fileName='tracers.dat', zlim=[], head_size=3, post=False): """ Reads the tracer files and composes a color map. call signature:: tracers, mapping, t = read_tracers(fileName = 'tracers.dat', datadir = 'data/', zlim = [], head_size = 3, post = False) Reads from the tracer files and computes the color map according to A R Yeates and G Hornig 2011 J. Phys. A: Math. Theor. 44 265501 doi:10.1088/1751-8113/44/26/265501. Returns the tracer values, the color mapping and the times of the snapshots. The color mapping can be plotted with: pc.animate_interactive(mapping[:,::-1,:,:], t, dimOrder = (2,1,0,3)) Keyword arguments: *datadir*: Data directory. *fileName*: Name of the tracer file. *zlim*: The upper limit for the field line mapping at which a field line is considered to have reached the upper boundary. *head_size*: Size of the Fortran header in binary data. Most of the time this is 3. For the St Andrews cluster it is 5. *post*: If True reads the post processed tracer file 'data/tracers.dat'. """ class data_struct: def __init__(self): self.xi = [] self.yi = [] self.xf = [] self.yf = [] self.zf = [] self.l = [] self.q = [] data = [] data = data_struct() # compute the offset in order to skip Fortran's header byte if (post): head_size = 0 off = 2 if (head_size == 3): off = 2 if (head_size == 5): off = 3 # read the cpu structure dim = pc.read_dim(datadir=datadir) if (dim.nprocz > 1): print(": number of cores in z-direction > 1") return -1 # read the parameters params = pc.read_param(datadir=datadir, quiet=True) # read the grid grid = pc.read_grid(datadir=datadir, quiet=True) # determine the file structure if (post): n_proc = 1 tracer_file = open(datadir + fileName, 'rb') trace_sub = struct.unpack("f", tracer_file.read(4))[0] tracer_file.close() n_times = int( (os.path.getsize(datadir + fileName) - 4) / (4 * 7 * int(dim.nx * trace_sub) * int(dim.ny * trace_sub))) # sub sampling of the tracers if (not (post)): n_proc = dim.nprocx * dim.nprocy trace_sub = params.trace_sub n_times = int( os.path.getsize(datadir + 'proc0/' + fileName) / (4 * (head_size + 7 * np.floor(dim.nx * trace_sub) * np.floor(dim.ny * trace_sub) / dim.nprocx / dim.nprocy))) # prepare the output arrays tracers = np.zeros( (int(dim.nx * trace_sub), int(dim.ny * trace_sub), n_times, 7)) mapping = np.zeros( (int(dim.nx * trace_sub), int(dim.ny * trace_sub), n_times, 3)) # temporary arrays for one core if (post): tracers_core = tracers mapping_core = mapping else: tracers_core = np.zeros( (int(int(dim.nx * trace_sub) / dim.nprocx), int(int(dim.ny * trace_sub) / dim.nprocy), n_times, 7)) mapping_core = np.zeros( (int(int(dim.nx * trace_sub) / dim.nprocx), int(np.floor(dim.ny * trace_sub) / dim.nprocy), n_times, 3)) # set the upper z-limit to the domain boundary if zlim == []: zlim = grid.z[-dim.nghostz - 1] # read the data from all cores for i in range(n_proc): # read the cpu structure if (post): dim_core = pc.read_dim(datadir=datadir, proc=-1) dim_core.ipx = 0 dim_core.ipy = 0 else: dim_core = pc.read_dim(datadir=datadir, proc=i) stride = int(dim_core.nx * trace_sub) * int(dim_core.ny * trace_sub) llen = head_size + 7 * stride + post if (post): tracer_file = open(datadir + fileName, 'rb') else: tracer_file = open(datadir + 'proc{0}/'.format(i) + fileName, 'rb') tmp = array.array('f') tmp.read( tracer_file, int((head_size + post + 7 * int(dim_core.nx * trace_sub) * int(dim_core.ny * trace_sub)) * n_times) + post) tracer_file.close() t = [] for j in range(n_times): t.append(tmp[off - 1 + j * llen]) data.xi = tmp[off + j * llen:off + 1 * stride + j * llen] data.yi = tmp[off + 1 * stride + j * llen:off + 2 * stride + j * llen] data.xf = tmp[off + 2 * stride + j * llen:off + 3 * stride + j * llen] data.yf = tmp[off + 3 * stride + j * llen:off + 4 * stride + j * llen] data.zf = tmp[off + 4 * stride + j * llen:off + 5 * stride + j * llen] data.l = tmp[off + 5 * stride + j * llen:off + 6 * stride + j * llen] data.q = tmp[off + 6 * stride + j * llen:off + 7 * stride + j * llen] # Squeeze the data into 2d array. This make the visualization much faster. for l in range(len(data.xi)): tracers_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = \ [data.xi[l], data.yi[l], data.xf[l], data.yf[l], data.zf[l], data.l[l], data.q[l]] if data.zf[l] >= zlim: if (data.xi[l] - data.xf[l]) > 0: if (data.yi[l] - data.yf[l]) > 0: mapping_core[l % (int(dim_core.nx * trace_sub)), int(l / (int(dim_core.nx * trace_sub))), j, :] = [0, 1, 0] else: mapping_core[l % (int(dim_core.nx * trace_sub)), int(l / (int(dim_core.nx * trace_sub))), j, :] = [1, 1, 0] else: if (data.yi[l] - data.yf[l]) > 0: mapping_core[l % (int(dim_core.nx * trace_sub)), int(l / (int(dim_core.nx * trace_sub))), j, :] = [0, 0, 1] else: mapping_core[l % (int(dim_core.nx * trace_sub)), int(l / (int(dim_core.nx * trace_sub))), j, :] = [1, 0, 0] else: mapping_core[l % (int(dim_core.nx * trace_sub)), int(l / (int(dim_core.nx * trace_sub))), j, :] = [1, 1, 1] # copy single core data into total data arrays if (not (post)): tracers[np.round(dim_core.ipx*int(dim_core.nx*trace_sub)):np.round((dim_core.ipx+1)*np.floor(dim_core.nx*trace_sub)), \ np.round(dim_core.ipy*int(dim_core.ny*trace_sub)):np.round((dim_core.ipy+1)*np.floor(dim_core.ny*trace_sub)),j,:] = \ tracers_core[:,:,j,:] mapping[np.round(dim_core.ipx*int(dim_core.nx*trace_sub)):np.round((dim_core.ipx+1)*np.floor(dim_core.nx*trace_sub)), \ np.round(dim_core.ipy*int(dim_core.ny*trace_sub)):np.round((dim_core.ipy+1)*np.floor(dim_core.ny*trace_sub)),j,:] = \ mapping_core[:,:,j,:] # swap axes for post evaluation tracers = tracers.swapaxes(0, 1) mapping = mapping.swapaxes(0, 1) return tracers, mapping, t
#!/usr/bin/python # $Id$ import numpy as N import pylab as P import pencil as pc from os import system system('cat video.in') field=raw_input('which field? ') f, t = pc.read_slices(field=field, proc=0, extension='xy') dim=pc.read_dim() grid=pc.read_grid(trim=True) nt=len(t) ff=f.reshape(nt, dim.nx) P.ion() P.subplot(211) line, = P.plot(grid.x, ff[0, :]) P.xlim(grid.x[0], grid.x[-1]) P.ylim(ymin=ff.min(), ymax=ff.max()) P.title(field) st=P.figtext(0.2, 0.85, 't=%.1f'%t[0]) for i in range(1, nt): line.set_ydata(ff[i, :]) st.set_text('t=%.1f'%t[i]) P.draw()
def fixed_points(datadir='data/', fileName='fixed_points_post.dat', varfile='VAR0', ti=-1, tf=-1, traceField='bb', hMin=2e-3, hMax=2e4, lMax=500, tol=1e-2, interpolation='weighted', trace_sub=1, integration='simple', nproc=1): """ Find the fixed points. call signature:: fixed = fixed_points(datadir = 'data/', fileName = 'fixed_points_post.dat', varfile = 'VAR0', ti = -1, tf = -1, traceField = 'bb', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, interpolation = 'weighted', trace_sub = 1, integration = 'simple', nproc = 1) Finds the fixed points. Returns the fixed points positions. Keyword arguments: *datadir*: Data directory. *fileName*: Name of the fixed points file. *varfile*: Varfile to be read. *ti*: Initial VAR file index for tracer time sequences. Overrides 'varfile'. *tf*: Final VAR file index for tracer time sequences. Overrides 'varfile'. *traceField*: Vector field used for the streamline tracing. *hMin*: Minimum step length for and underflow to occur. *hMax*: Parameter for the initial step length. *lMax*: Maximum length of the streamline. Integration will stop if l >= lMax. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'weighted': weights the adjacent grid points according to their distance. *trace_sub*: Number of sub-grid cells for the seeds for the initial mapping. *intQ*: Quantities to be integrated along the streamlines. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *nproc*: Number of cores for multi core computation. """ class data_struct: def __init__(self): self.t = [] self.fidx = [] # number of fixed points at this time self.x = [] self.y = [] self.q = [] # Computes rotation along one edge. def edge(vv, p, sx, sy, diff1, diff2, phiMin, rec, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration): dtot = m.atan2(diff1[0] * diff2[1] - diff2[0] * diff1[1], diff1[0] * diff2[0] + diff1[1] * diff2[1]) if ((abs(dtot) > phiMin) and (rec < 4)): xm = 0.5 * (sx[0] + sx[1]) ym = 0.5 * (sy[0] + sy[1]) # trace intermediate field line s = pc.stream(vv, p, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration, xx=np.array([xm, ym, p.Oz])) tracer = np.concatenate( (s.tracers[0, 0:2], s.tracers[s.sl - 1, :], np.reshape(s.l, (1)))) # discard any streamline which does not converge or hits the boundary if ((tracer[5] >= lMax) or (tracer[4] < p.Oz + p.Lz - p.dz)): dtot = 0. else: diffm = np.array( [tracer[2] - tracer[0], tracer[3] - tracer[1]]) if (sum(diffm**2) != 0): diffm = diffm / np.sqrt(sum(diffm**2)) dtot = edge(vv, p, [sx[0], xm], [sy[0], ym], diff1, diffm, phiMin, rec+1, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration)+ \ edge(vv, p, [xm, sx[1]], [ym, sy[1]], diffm, diff2, phiMin, rec+1, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) return dtot # Finds the Poincare index of this grid cell. def pIndex(vv, p, sx, sy, diff, phiMin, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration): poincare = 0 poincare += edge(vv, p, [sx[0], sx[1]], [sy[0], sy[0]], diff[0, :], diff[1, :], phiMin, 0, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration) poincare += edge(vv, p, [sx[1], sx[1]], [sy[0], sy[1]], diff[1, :], diff[2, :], phiMin, 0, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration) poincare += edge(vv, p, [sx[1], sx[0]], [sy[1], sy[1]], diff[2, :], diff[3, :], phiMin, 0, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration) poincare += edge(vv, p, [sx[0], sx[0]], [sy[1], sy[0]], diff[3, :], diff[0, :], phiMin, 0, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration) return poincare # fixed point finder for a subset of the domain def subFixed(queue, ix0, iy0, vv, p, tracers, iproc, hMin=2e-3, hMax=2e4, lMax=500, tol=1e-2, interpolation='weighted', integration='simple'): diff = np.zeros((4, 2)) phiMin = np.pi / 8. x = [] y = [] q = [] fidx = 0 for ix in ix0: for iy in iy0: # compute Poincare index around this cell (!= 0 for potential fixed point) diff[0, :] = tracers[iy, ix, 0, 2:4] - tracers[iy, ix, 0, 0:2] diff[1, :] = tracers[iy, ix + 1, 0, 2:4] - tracers[iy, ix + 1, 0, 0:2] diff[2, :] = tracers[iy + 1, ix + 1, 0, 2:4] - tracers[iy + 1, ix + 1, 0, 0:2] diff[3, :] = tracers[iy + 1, ix, 0, 2:4] - tracers[iy + 1, ix, 0, 0:2] if (sum(np.sum(diff**2, axis=1) != 0) == True): diff = np.swapaxes( np.swapaxes(diff, 0, 1) / np.sqrt(np.sum(diff**2, axis=1)), 0, 1) poincare = pIndex(vv, p, tracers[iy, ix:ix + 2, 0, 0], tracers[iy:iy + 2, ix, 0, 1], diff, phiMin, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration) if (abs(poincare) > 5 ): # use 5 instead of 2pi to account for rounding errors # subsample to get starting point for iteration nt = 4 xmin = tracers[iy, ix, 0, 0] ymin = tracers[iy, ix, 0, 1] xmax = tracers[iy, ix + 1, 0, 0] ymax = tracers[iy + 1, ix, 0, 1] xx = np.zeros((nt**2, 3)) tracersSub = np.zeros((nt**2, 5)) i1 = 0 for j1 in range(nt): for k1 in range(nt): xx[i1, 0] = xmin + j1 / (nt - 1.) * (xmax - xmin) xx[i1, 1] = ymin + k1 / (nt - 1.) * (ymax - ymin) xx[i1, 2] = p.Oz i1 += 1 for it1 in range(nt**2): s = pc.stream(vv, p, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration, xx=xx[it1, :]) tracersSub[it1, 0:2] = xx[it1, 0:2] tracersSub[it1, 2:] = s.tracers[s.sl - 1, :] min2 = 1e6 minx = xmin miny = ymin i1 = 0 for j1 in range(nt): for k1 in range(nt): diff2 = (tracersSub[i1, 2] - tracersSub[i1, 0] )**2 + (tracersSub[i1, 3] - tracersSub[i1, 1])**2 if (diff2 < min2): min2 = diff2 minx = xmin + j1 / (nt - 1.) * (xmax - xmin) miny = ymin + k1 / (nt - 1.) * (ymax - ymin) it1 += 1 # get fixed point from this starting position using Newton's method #TODO: dl = np.min( var.dx, var.dy ) / 100. # step-size for calculating the Jacobian by finite differences it = 0 # tracers used to find the fixed point tracersNull = np.zeros((5, 4)) point = np.array([minx, miny]) while True: # trace field lines at original point and for Jacobian: # (second order seems to be enough) xx = np.zeros((5, 3)) xx[0, :] = np.array([point[0], point[1], p.Oz]) xx[1, :] = np.array([point[0] - dl, point[1], p.Oz]) xx[2, :] = np.array([point[0] + dl, point[1], p.Oz]) xx[3, :] = np.array([point[0], point[1] - dl, p.Oz]) xx[4, :] = np.array([point[0], point[1] + dl, p.Oz]) for it1 in range(5): s = pc.stream(vv, p, hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, integration=integration, xx=xx[it1, :]) tracersNull[it1, :2] = xx[it1, :2] tracersNull[it1, 2:] = s.tracers[s.sl - 1, 0:2] # check function convergence ff = np.zeros(2) ff[0] = tracersNull[0, 2] - tracersNull[0, 0] ff[1] = tracersNull[0, 3] - tracersNull[0, 1] #TODO: if (sum(abs(ff)) <= 1e-4): fixedPoint = np.array([point[0], point[1]]) break # compute the Jacobian fjac = np.zeros((2, 2)) fjac[0, 0] = ( (tracersNull[2, 2] - tracersNull[2, 0]) - (tracersNull[1, 2] - tracersNull[1, 0])) / 2. / dl fjac[0, 1] = ( (tracersNull[4, 2] - tracersNull[4, 0]) - (tracersNull[3, 2] - tracersNull[3, 0])) / 2. / dl fjac[1, 0] = ( (tracersNull[2, 3] - tracersNull[2, 1]) - (tracersNull[1, 3] - tracersNull[1, 1])) / 2. / dl fjac[1, 1] = ( (tracersNull[4, 3] - tracersNull[4, 1]) - (tracersNull[3, 3] - tracersNull[3, 1])) / 2. / dl # invert the Jacobian fjin = np.zeros((2, 2)) det = fjac[0, 0] * fjac[1, 1] - fjac[0, 1] * fjac[1, 0] #TODO: if (abs(det) < dl): fixedPoint = point break fjin[0, 0] = fjac[1, 1] fjin[1, 1] = fjac[0, 0] fjin[0, 1] = -fjac[0, 1] fjin[1, 0] = -fjac[1, 0] fjin = fjin / det dpoint = np.zeros(2) dpoint[0] = -fjin[0, 0] * ff[0] - fjin[0, 1] * ff[1] dpoint[1] = -fjin[1, 0] * ff[0] - fjin[1, 1] * ff[1] point += dpoint # check root convergence #TODO: if (sum(abs(dpoint)) < 1e-4): fixedPoint = point break if (it > 20): fixedPoint = point print "warning: Newton did not converged" break it += 1 # check if fixed point lies inside the cell if ((fixedPoint[0] < tracers[iy, ix, 0, 0]) or (fixedPoint[0] > tracers[iy, ix + 1, 0, 0]) or (fixedPoint[1] < tracers[iy, ix, 0, 1]) or (fixedPoint[1] > tracers[iy + 1, ix, 0, 1])): print "warning: fixed point lies outside the cell" else: x.append(fixedPoint[0]) y.append(fixedPoint[1]) #q.append() fidx += 1 queue.put((x, y, q, fidx, iproc)) # multi core setup if (np.isscalar(nproc) == False) or (nproc % 1 != 0): print("error: invalid processor number") return -1 queue = mp.Queue() proc = [] # make sure to read the var files with the correct magic if (traceField == 'bb'): magic = 'bb' if (traceField == 'jj'): magic = 'jj' if (traceField == 'vort'): magic = 'vort' # read the cpu structure dim = pc.read_dim(datadir=datadir) if (dim.nprocz > 1): print "error: number of cores in z-direction > 1" var = pc.read_var(varfile=varfile, datadir=datadir, magic=magic, quiet=True, trimall=True) grid = pc.read_grid(datadir=datadir, quiet=True, trim=True) vv = getattr(var, traceField) # initialize the parameters p = pc.pClass() p.dx = var.dx p.dy = var.dy p.dz = var.dz p.Ox = var.x[0] p.Oy = var.y[0] p.Oz = var.z[0] p.Lx = grid.Lx p.Ly = grid.Ly p.Lz = grid.Lz p.nx = dim.nx p.ny = dim.ny p.nz = dim.nz # create the initial mapping tracers, mapping, t = pc.tracers(traceField='bb', hMin=hMin, hMax=hMax, lMax=lMax, tol=tol, interpolation=interpolation, trace_sub=trace_sub, varfile=varfile, integration=integration, datadir=datadir, destination='', nproc=nproc) # find fixed points fixed = pc.fixed_struct() xyq = [] # list of return values from subFixed ix0 = range(0, p.nx * trace_sub - 1) # set of grid indices for the cores iy0 = range(0, p.ny * trace_sub - 1) # set of grid indices for the cores subFixedLambda = lambda queue, ix0, iy0, vv, p, tracers, iproc: \ subFixed(queue, ix0, iy0, vv, p, tracers, iproc, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) for iproc in range(nproc): proc.append( mp.Process(target=subFixedLambda, args=(queue, ix0[iproc::nproc], iy0, vv, p, tracers, iproc))) for iproc in range(nproc): proc[iproc].start() for iproc in range(nproc): xyq.append(queue.get()) for iproc in range(nproc): proc[iproc].join() # put together return values from subFixed fixed.fidx = 0 fixed.t = var.t for iproc in range(nproc): fixed.x.append(xyq[xyq[iproc][4]][0]) fixed.y.append(xyq[xyq[iproc][4]][1]) fixed.q.append(xyq[xyq[iproc][4]][2]) fixed.fidx += xyq[xyq[iproc][4]][3] fixed.t = np.array(fixed.t) fixed.x = np.array(fixed.x) fixed.y = np.array(fixed.y) fixed.q = np.array(fixed.q) fixed.fidx = np.array(fixed.fidx) return fixed
def pc2vtk_vid(ti=0, tf=1, datadir='data/', proc=-1, variables=['rho', 'uu', 'bb'], magic=[], b_ext=False, destination='animation', quiet=True): """ Convert data from PencilCode format to vtk. call signature:: pc2vtk(ti = 0, tf = 1, datadir = 'data/', proc = -1, variables = ['rho','uu','bb'], magic = [], destination = 'animation') Read *varfile* and convert its content into vtk format. Write the result in *destination*. Keyword arguments: *ti*: Initial time. *tf*: Final time. *datadir*: Directory where the data is stored. *proc*: Processor which should be read. Set to -1 for all processors. *variables* = [ 'rho' , 'lnrho' , 'uu' , 'bb', 'b_mag', 'jj', 'j_mag', 'aa', 'ab', 'TT', 'lnTT', 'cc', 'lncc', 'ss', 'vort' ] Variables which should be written. *magic*: [ 'vort' , 'bb' ] Additional variables which should be written. *b_ext*: Add the external magnetic field. *destination*: Destination files without '.vtk' extension. *quiet*: Keep quiet when reading the var files. """ # this should correct for the case the user type only one variable if (len(variables) > 0): if (len(variables[0]) == 1): variables = [variables] # this should correct for the case the user type only one variable if (len(magic) > 0): if (len(magic[0]) == 1): magic = [magic] # make sure magic is set when writing 'vort' or 'bb' try: index = variables.index('vort') magic.append('vort') except: pass try: index = variables.index('bb') magic.append('bb') except: pass try: index = variables.index('b_mag') magic.append('bb') except: pass try: index = variables.index('jj') magic.append('jj') except: pass try: index = variables.index('j_mag') magic.append('jj') except: pass for i in range(ti, tf + 1): varfile = 'VAR' + str(i) # reading pc variables and setting dimensions var = pc.read_var(varfile=varfile, datadir=datadir, proc=proc, magic=magic, trimall=True, quiet=quiet) grid = pc.read_grid(datadir=datadir, proc=proc, trim=True, quiet=True) params = pc.read_param(param2=True, quiet=True) B_ext = np.array(params.b_ext) # add external magnetic field if (b_ext == True): var.bb[0, ...] += B_ext[0] var.bb[1, ...] += B_ext[1] var.bb[2, ...] += B_ext[2] dimx = len(grid.x) dimy = len(grid.y) dimz = len(grid.z) dim = dimx * dimy * dimz dx = (np.max(grid.x) - np.min(grid.x)) / (dimx - 1) dy = (np.max(grid.y) - np.min(grid.y)) / (dimy - 1) dz = (np.max(grid.z) - np.min(grid.z)) / (dimz - 1) #fd = open(destination + "{0:1.0f}".format(var.t*1e5) + '.vtk', 'wb') fd = open(destination + str(i) + '.vtk', 'wb') fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8')) fd.write('density + magnetic field\n'.encode('utf-8')) fd.write('BINARY\n'.encode('utf-8')) fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8')) fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(dimx, dimy, dimz).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format( grid.x[0], grid.y[0], grid.z[0]).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( dx, dy, dz).encode('utf-8')) fd.write('POINT_DATA {0:9}\n'.format(dim).encode('utf-8')) try: index = variables.index('rho') print('writing rho') fd.write('SCALARS rho float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.rho[k, j, i])) except: pass try: index = variables.index('lnrho') print('writing lnrho') fd.write('SCALARS lnrho float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lnrho[k, j, i])) except: pass try: index = variables.index('uu') print('writing uu') fd.write('VECTORS vfield float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.uu[0, k, j, i])) fd.write(struct.pack(">f", var.uu[1, k, j, i])) fd.write(struct.pack(">f", var.uu[2, k, j, i])) except: pass try: index = variables.index('bb') print('writing bb') fd.write('VECTORS bfield float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.bb[0, k, j, i])) fd.write(struct.pack(">f", var.bb[1, k, j, i])) fd.write(struct.pack(">f", var.bb[2, k, j, i])) except: pass try: index = variables.index('b_mag') b_mag = np.sqrt(pc.dot2(var.bb)) print('writing b_mag') fd.write('SCALARS b_mag float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", b_mag[k, j, i])) except: pass try: index = variables.index('jj') print('writing jj') fd.write('VECTORS jfield float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.jj[0, k, j, i])) fd.write(struct.pack(">f", var.jj[1, k, j, i])) fd.write(struct.pack(">f", var.jj[2, k, j, i])) except: pass try: index = variables.index('j_mag') j_mag = np.sqrt(pc.dot2(var.jj)) print('writing j_mag') fd.write('SCALARS j_mag float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", j_mag[k, j, i])) except: pass try: index = variables.index('aa') print('writing aa') fd.write('VECTORS afield float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.aa[0, k, j, i])) fd.write(struct.pack(">f", var.aa[1, k, j, i])) fd.write(struct.pack(">f", var.aa[2, k, j, i])) except: pass try: index = variables.index('ab') ab = pc.dot(var.aa, var.bb) print('writing ab') fd.write('SCALARS ab float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", ab[k, j, i])) except: pass try: index = variables.index('TT') print('writing TT') fd.write('SCALARS TT float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.TT[k, j, i])) except: pass try: index = variables.index('lnTT') print('writing lnTT') fd.write('SCALARS lnTT float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lnTT[k, j, i])) except: pass try: index = variables.index('cc') print('writing cc') fd.write('SCALARS cc float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.cc[k, j, i])) except: pass try: index = variables.index('lncc') print('writing lncc') fd.write('SCALARS lncc float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.lncc[k, j, i])) except: pass try: index = variables.index('ss') print('writing ss') fd.write('SCALARS ss float\n'.encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.ss[k, j, i])) except: pass try: index = variables.index('vort') print('writing vort') fd.write('VECTORS vorticity float\n'.encode('utf-8')) for k in range(dimz): for j in range(dimy): for i in range(dimx): fd.write(struct.pack(">f", var.vort[0, k, j, i])) fd.write(struct.pack(">f", var.vort[1, k, j, i])) fd.write(struct.pack(">f", var.vort[2, k, j, i])) except: pass del (var) fd.close()
def calc_tensors( datatopdir, lskip_zeros=False, datadir='data/', rank=0, size=1, comm=None, proc=[0], l_mpi=True, iuxmxy=0, irhomxy=7, iTTmxy=6, first_alpha=9, l_correction=False, t_correction=0., fskip=2, mskip=1, trange=(0,None), tindex=(0,None,1), yindex=[] ): nt=None alltmp=100000 dim=pc.read_dim() gc.garbage if len(yindex)==0: iy=np.arange(dim.ny) else: iy=yindex os.chdir(datatopdir) # return to working directory av=[] if l_mpi: from mpi4py import MPI if proc.size<dim.nprocz: print('rank {}: proc.size {} < dim.nprocz {}'.format( rank, proc.size, dim.nprocz)) yproc=proc[0]/dim.nprocz aav, time = pc.read_zaver(datadir, trange=trange, tindex=tindex, proc=yproc ) tmp=time.size else: print('rank {}: proc.size {} >= dim.nprocz {}'.format( rank, proc.size, dim.nprocz)) for iproc in range(0,proc.size,dim.nprocz): if iproc ==0: aav, time = pc.read_zaver(datadir, trange=trange, tindex=tindex, proc=proc[iproc]/dim.nprocz ) tmp=time.size else: aav, time = pc.read_zaver(datadir, proc=proc[iproc]/dim.nprocz ) tmp=min(time.size,tmp) else: av, time = pc.read_zaver(datadir, trange=trange, tindex=tindex ) gc.garbage if l_mpi: print('rank {}: tmp {}'.format(rank, tmp)) if rank != 0: comm.send(tmp, dest=0, tag=rank) else: for irank in range(1,size): tmp=comm.recv(source=irank, tag=irank) alltmp=min(alltmp,tmp) nt=comm.bcast(alltmp, root=0) print('rank {}: nt {}'.format(rank, nt)) if proc.size<dim.nprocz: yndx=iy-yproc*(dim.nygrid/dim.nprocy) print('rank {}: yndx[0] {}'.format(rank, yndx[0])) av=aav[:nt,:,yndx,:] else: av=aav[:nt] for iproc in range(dim.nprocz,proc.size,dim.nprocz): aav, time = pc.read_zaver(datadir, tindex=(0,nt,1), proc=proc[iproc]/dim.nprocz ) av=np.concatenate((av,aav), axis=2) aav=[] print('rank {}: loaded av'.format(rank)) #where testfield calculated under old incorrect spec apply correction gc.garbage if l_correction: itcorr = np.where(time<t_correction)[0] av[itcorr,first_alpha+2] *= -dim.nprocz/(dim.nprocz-2.) for j in range(0,3): av[itcorr,first_alpha+5+j] *= -dim.nprocz/(dim.nprocz-2.) av[itcorr,first_alpha+11] *= -dim.nprocz/(dim.nprocz-2.) for j in range(0,3): av[itcorr,first_alpha+14+j] *= -dim.nprocz/(dim.nprocz-2.) av[itcorr,first_alpha+20] *= -dim.nprocz/(dim.nprocz-2.) for j in range(0,3): av[itcorr,first_alpha+23+j] *= -dim.nprocz/(dim.nprocz-2.) #factor by which to rescale code time to years trescale = 0.62/2.7e-6/(365.*86400.) #0.007281508 time *= trescale grid = pc.read_grid(datadir,trim=True, quiet=True) r, theta = np.meshgrid(grid.x,grid.y[iy]) gc.garbage #exclude zeros and next point if resetting of test fields is used #trim reset data and neighbours as required fskip after zeros and mskip before zeros. if lskip_zeros: if l_mpi: if rank==0: izer0=np.where(av[:,9,av.shape[2]/2,av.shape[3]/2]==0)[0] for ii in range(1,fskip): izer1=np.where(av[:,9,av.shape[2]/2,av.shape[3]/2]==0)[0]+ii izer0=np.append(izer0,izer1) for ii in range(1,mskip): izer1=np.where(av[:,9,av.shape[2]/2,av.shape[3]/2]==0)[0]-ii izer0=np.append(izer0,izer1) if izer0.size>0: imask=np.delete(np.where(time),[izer0]) else: imask=np.where(time)[0] else: imask=None imask=comm.bcast(imask, root=0) else: izer0=np.where(av[:,9,av.shape[2]/2,av.shape[3]/2]==0)[0] for ii in range(1,fskip): izer1=np.where(av[:,9,av.shape[2]/2,av.shape[3]/2]==0)[0]+ii izer0=np.append(izer0,izer1) for ii in range(1,mskip): izer1=np.where(av[:,9,av.shape[2]/2,av.shape[3]/2]==0)[0]-ii izer0=np.append(izer0,izer1) if izer0.size>0: imask=np.delete(np.where(time),[izer0]) else: imask=np.where(time)[0] else: imask=np.arange(time.size) #if lskip_zeros: # izer0=np.where(av[:,first_alpha,av.shape[2]/2,av.shape[3]/2]==0)[0] # izer1=np.where(av[:,first_alpha,av.shape[2]/2,av.shape[3]/2]==0)[0]+1 # if izer0.size>0: # imask=np.delete(np.where(time[:nt]),[izer0,izer1]) # else: # imask=np.where(time[:nt])[0] #else: # imask=np.arange(time[:nt].size) if rank==0: print('rank {}: calculating alp'.format(rank)) alp=np.zeros([3,3,imask.size,av.shape[2],av.shape[3]]) eta=np.zeros([3,3,3,imask.size,av.shape[2],av.shape[3]]) urmst = np.zeros([3,3,av.shape[2],av.shape[3]]) etat0 = np.zeros([3,3,3,av.shape[2],av.shape[3]]) #eta0 = np.zeros([3,3,3,imask.size,av.shape[2],av.shape[3]]) Hp = np.zeros([av.shape[2],av.shape[3]]) #compute rms velocity normalisation if rank==0: print('rank {}: calculating urms'.format(rank)) urms = np.sqrt(np.mean( av[imask,iuxmxy+3,:,:]-av[imask,iuxmxy+0,:,:]**2+ av[imask,iuxmxy+4,:,:]-av[imask,iuxmxy+1,:,:]**2+ av[imask,iuxmxy+5,:,:]-av[imask,iuxmxy+2,:,:]**2 ,axis=0)) #compute turbulent diffusion normalisation cv, gm, alp_MLT = 0.6, 5./3, 5./3 pp = np.mean(av[imask,iTTmxy,:,:]*av[imask,irhomxy,:,:]*cv*(gm-1), axis=0) if rank==0: print('rank {}: completed pressure'.format(rank)) for i in range(0,av.shape[2]): Hp[i,:] = -1./np.gradient(np.log(pp[i,:]),grid.dx) grid,pp=[],[] for i in range(0,3): for j in range(0,3): alp[i,j,:,:,:] = av[imask,first_alpha+3*j+i,:,:] urmst[i,j,:,:] = urms/3. for k in range(0,3): etat0[i,j,k,:,:] = urms * alp_MLT * Hp/3. #for i in range(0,imask.size): # eta0[i,:,:,:,:,:] = etat0 if rank==0: print('rank {}: calculating eta'.format(rank)) for j in range(0,3): for k in range(0,3): # Sign difference with Schrinner + r correction eta[j,k,1,:,:,:] = -av[imask,first_alpha+18+3*k+j,:,:]*r eta[j,k,0,:,:,:] = -av[imask,first_alpha+9 +3*k+j,:,:] nnt,ny,nx = imask.size,av.shape[2],av.shape[3] av=[] irr, ith, iph = 0,1,2 # Create output tensors if rank==0: print('rank {}: setting alp'.format(rank)) alpha = np.zeros([3,3,nnt,ny,nx]) beta = np.zeros([3,3,nnt,ny,nx]) gamma = np.zeros([3,nnt,ny,nx]) delta = np.zeros([3,nnt,ny,nx]) kappa = np.zeros([3,3,3,nnt,ny,nx]) # Alpha tensor if rank==0: print('rank {}: calculating alpha'.format(rank)) alpha[irr,irr,:,:,:] = (alp[irr,irr,:,:,:]-eta[irr,ith,ith,:,:,:]/r) alpha[irr,ith,:,:,:] = 0.5*(alp[irr,ith,:,:,:]+eta[irr,irr,ith,:,:,:]/r+alp[ith,irr,:,:,:]-eta[ith,ith,ith,:,:,:]/r) alpha[irr,iph,:,:,:] = 0.5*(alp[iph,irr,:,:,:]+alp[irr,iph,:,:,:] - eta[iph,ith,ith,:,:,:]/r) alpha[ith,irr,:,:,:] = alpha[irr,ith,:,:,:] alpha[ith,ith,:,:,:] = (alp[ith,ith,:,:,:]+eta[ith,irr,ith,:,:,:]/r) alpha[ith,iph,:,:,:] = 0.5*(alp[iph,ith,:,:,:]+alp[ith,iph,:,:,:]+eta[iph,irr,ith,:,:,:]/r) alpha[iph,irr,:,:,:] = alpha[irr,iph,:,:,:] alpha[iph,ith,:,:,:] = alpha[ith,iph,:,:,:] alpha[iph,iph,:,:,:] = alp[iph,iph,:,:,:] # Gamma vector gamma[irr,:,:,:] = -0.5*(alp[ith,iph,:,:,:]-alp[iph,ith,:,:,:]-eta[iph,irr,ith,:,:,:]/r) gamma[ith,:,:,:] = -0.5*(alp[iph,irr,:,:,:]-alp[irr,iph,:,:,:]-eta[iph,ith,ith,:,:,:]/r) gamma[iph,:,:,:] = -0.5*(alp[irr,ith,:,:,:]-alp[ith,irr,:,:,:]+eta[irr,irr,ith,:,:,:]/r +eta[ith,ith,ith,:,:,:]/r) if rank==0: print('rank {}: calculating beta'.format(rank)) alp=[] # Beta tensor beta[irr,irr,:,:,:] = -0.5* eta[irr,iph,ith,:,:,:] beta[irr,ith,:,:,:] = 0.25*(eta[irr,iph,irr,:,:,:] - eta[ith,iph,ith,:,:,:]) beta[irr,iph,:,:,:] = 0.25*(eta[irr,irr,ith,:,:,:] - eta[iph,iph,ith,:,:,:] - eta[irr,ith,irr,:,:,:]) beta[ith,ith,:,:,:] = 0.5*eta[ith,iph,irr,:,:,:] beta[ith,iph,:,:,:] = 0.25*(eta[ith,irr,ith,:,:,:] + eta[iph,iph,irr,:,:,:] - eta[ith,ith,irr,:,:,:]) beta[iph,iph,:,:,:] = 0.5*(eta[iph,irr,ith,:,:,:] - eta[iph,ith,irr,:,:,:]) beta[ith,irr,:,:,:] = beta[irr,ith,:,:,:] beta[iph,irr,:,:,:] = beta[irr,iph,:,:,:] beta[iph,ith,:,:,:] = beta[ith,iph,:,:,:] # Delta vector delta[irr,:,:,:] = 0.25*(eta[ith,ith,irr,:,:,:] - eta[ith,irr,ith,:,:,:] + eta[iph,iph,irr,:,:,:]) delta[ith,:,:,:] = 0.25*(eta[irr,irr,ith,:,:,:] - eta[irr,ith,irr,:,:,:] + eta[iph,iph,ith,:,:,:]) delta[iph,:,:,:] = -0.25*(eta[irr,iph,irr,:,:,:] + eta[ith,iph,ith,:,:,:]) # Kappa tensor if rank==0: print('rank {}: calculating kappa'.format(rank)) for i in range(0,3): kappa[i,irr,irr,:,:,:]= -eta[i,irr,irr,:,:,:] kappa[i,irr,ith,:,:,:]= -0.5*(eta[i,ith,irr,:,:,:]+eta[i,irr,ith,:,:,:]) kappa[i,irr,iph,:,:,:]= -0.5* eta[i,iph,irr,:,:,:] kappa[i,ith,irr,:,:,:]= kappa[i,irr,ith,:,:,:] kappa[i,ith,ith,:,:,:]= - eta[i,ith,ith,:,:,:] kappa[i,ith,iph,:,:,:]= -0.5* eta[i,iph,ith,:,:,:] kappa[i,iph,irr,:,:,:]= kappa[i,irr,iph,:,:,:] kappa[i,iph,ith,:,:,:]= kappa[i,ith,iph,:,:,:] #for it in range(0,nnt): # kappa[i,iph,iph,it,:,:]= 1e-9*etat0[i,0,0,:,:] eta=[] return alpha, beta, gamma, delta, kappa,\ time[imask], urmst, etat0
def aver2vtk(varfile='xyaverages.dat', datadir='data/', destination='xyaverages', quiet=1): """ Convert average data from PencilCode format to vtk. call signature:: aver2vtk(varfile = 'xyaverages.dat', datadir = 'data/', destination = 'xyaverages', quiet = 1): Read the average file specified in *varfile* and convert the data into vtk format. Write the result in *destination*. Keyword arguments: *varfile*: Name of the average file. This also specifies which dimensions the averages are taken. *datadir*: Directory where the data is stored. *destination*: Destination file. """ # read the grid dimensions grid = pc.read_grid(datadir=datadir, trim=True, quiet=True) # read the specified average file if varfile[0:2] == 'xy': aver = pc.read_xyaver() line_len = int(np.round(grid.Lz / grid.dz)) l0 = grid.z[int((len(grid.z) - line_len) / 2)] dl = grid.dz elif varfile[0:2] == 'xz': aver = pc.read_xzaver() line_len = int(np.round(grid.Ly / grid.dy)) l0 = grid.y[int((len(grid.y) - line_len) / 2)] dl = grid.dy elif varfile[0:2] == 'yz': aver = pc.read_yzaver() line_len = int(np.round(grid.Lx / grid.dx)) l0 = grid.x[int((len(grid.x) - line_len) / 2)] dl = grid.dx else: print("aver2vtk: ERROR: cannot determine average file\n") print( "aver2vtk: The name of the file has to be either xyaver.dat, xzaver.dat or yzaver.dat\n" ) return -1 keys = aver.__dict__.keys() t = aver.t keys.remove('t') # open the destination file fd = open(destination + '.vtk', 'wb') fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8')) fd.write(varfile[0:2] + 'averages\n'.encode('utf-8')) fd.write('BINARY\n'.encode('utf-8')) fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8')) fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(len(t), line_len, 1).encode('utf-8')) fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(float(t[0]), l0, 0.).encode('utf-8')) fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format( t[1] - t[0], dl, 1.).encode('utf-8')) fd.write('POINT_DATA {0:9}\n'.format(len(t) * line_len)) # run through all variables for var in keys: fd.write(('SCALARS ' + var + ' float\n').encode('utf-8')) fd.write('LOOKUP_TABLE default\n'.encode('utf-8')) for j in range(line_len): for i in range(len(t)): fd.write(struct.pack(">f", aver.__dict__[var][i, j])) fd.close()
def read_tracers(datadir = 'data/', fileName = 'tracers.dat', zlim = [], head_size = 3, post = False): """ Reads the tracer files and composes a color map. call signature:: tracers, mapping, t = read_tracers(fileName = 'tracers.dat', datadir = 'data/', zlim = [], head_size = 3, post = False) Reads from the tracer files and computes the color map according to A R Yeates and G Hornig 2011 J. Phys. A: Math. Theor. 44 265501 doi:10.1088/1751-8113/44/26/265501. Returns the tracer values, the color mapping and the times of the snapshots. The color mapping can be plotted with: pc.animate_interactive(mapping[:,::-1,:,:], t, dimOrder = (2,1,0,3)) Keyword arguments: *datadir*: Data directory. *fileName*: Name of the tracer file. *zlim*: The upper limit for the field line mapping at which a field line is considered to have reached the upper boundary. *head_size*: Size of the Fortran header in binary data. Most of the time this is 3. For the St Andrews cluster it is 5. *post*: If True reads the post processed tracer file 'data/tracers.dat'. """ class data_struct: def __init__(self): self.xi = [] self.yi = [] self.xf = [] self.yf = [] self.zf = [] self.l = [] self.q = [] data = [] data = data_struct() # compute the offset in order to skip Fortran's header byte if (post): head_size = 0 off = 2 if (head_size == 3): off = 2 if (head_size == 5): off = 3 # read the cpu structure dim = pc.read_dim(datadir = datadir) if (dim.nprocz > 1): print(": number of cores in z-direction > 1") return -1 # read the parameters params = pc.read_param(datadir = datadir, quiet = True) # read the grid grid = pc.read_grid(datadir = datadir, quiet = True) # determine the file structure if (post): n_proc = 1 tracer_file = open(datadir+fileName, 'rb') trace_sub = struct.unpack("f", tracer_file.read(4))[0] tracer_file.close() n_times = int((os.path.getsize(datadir+fileName)-4)/(4*7*int(dim.nx*trace_sub)*int(dim.ny*trace_sub))) # sub sampling of the tracers if (not(post)): n_proc = dim.nprocx*dim.nprocy trace_sub = params.trace_sub n_times = int(os.path.getsize(datadir+'proc0/'+fileName)/(4*(head_size + 7*np.floor(dim.nx*trace_sub)*np.floor(dim.ny*trace_sub)/dim.nprocx/dim.nprocy))) # prepare the output arrays tracers = np.zeros((int(dim.nx*trace_sub), int(dim.ny*trace_sub), n_times, 7)) mapping = np.zeros((int(dim.nx*trace_sub), int(dim.ny*trace_sub), n_times, 3)) # temporary arrays for one core if (post): tracers_core = tracers mapping_core = mapping else: tracers_core = np.zeros((int(int(dim.nx*trace_sub)/dim.nprocx), int(int(dim.ny*trace_sub)/dim.nprocy), n_times, 7)) mapping_core = np.zeros((int(int(dim.nx*trace_sub)/dim.nprocx), int(np.floor(dim.ny*trace_sub)/dim.nprocy), n_times, 3)) # set the upper z-limit to the domain boundary if zlim == []: zlim = grid.z[-dim.nghostz-1] # read the data from all cores for i in range(n_proc): # read the cpu structure if (post): dim_core = pc.read_dim(datadir = datadir, proc = -1) dim_core.ipx = 0 dim_core.ipy = 0 else: dim_core = pc.read_dim(datadir = datadir, proc = i) stride = int(dim_core.nx*trace_sub)*int(dim_core.ny*trace_sub) llen = head_size + 7*stride + post if (post): tracer_file = open(datadir+fileName, 'rb') else: tracer_file = open(datadir+'proc{0}/'.format(i)+fileName, 'rb') tmp = array.array('f') tmp.read(tracer_file, int((head_size + post + 7*int(dim_core.nx*trace_sub)*int(dim_core.ny*trace_sub))*n_times)+post) tracer_file.close() t = [] for j in range(n_times): t.append(tmp[off-1+j*llen]) data.xi = tmp[off+j*llen : off+1*stride+j*llen] data.yi = tmp[off+1*stride+j*llen : off+2*stride+j*llen] data.xf = tmp[off+2*stride+j*llen : off+3*stride+j*llen] data.yf = tmp[off+3*stride+j*llen : off+4*stride+j*llen] data.zf = tmp[off+4*stride+j*llen : off+5*stride+j*llen] data.l = tmp[off+5*stride+j*llen : off+6*stride+j*llen] data.q = tmp[off+6*stride+j*llen : off+7*stride+j*llen] # Squeeze the data into 2d array. This make the visualization much faster. for l in range(len(data.xi)): tracers_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = \ [data.xi[l], data.yi[l], data.xf[l], data.yf[l], data.zf[l], data.l[l], data.q[l]] if data.zf[l] >= zlim: if (data.xi[l] - data.xf[l]) > 0: if (data.yi[l] - data.yf[l]) > 0: mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [0,1,0] else: mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [1,1,0] else: if (data.yi[l] - data.yf[l]) > 0: mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [0,0,1] else: mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [1,0,0] else: mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [1,1,1] # copy single core data into total data arrays if (not(post)): tracers[np.round(dim_core.ipx*int(dim_core.nx*trace_sub)):np.round((dim_core.ipx+1)*np.floor(dim_core.nx*trace_sub)), \ np.round(dim_core.ipy*int(dim_core.ny*trace_sub)):np.round((dim_core.ipy+1)*np.floor(dim_core.ny*trace_sub)),j,:] = \ tracers_core[:,:,j,:] mapping[np.round(dim_core.ipx*int(dim_core.nx*trace_sub)):np.round((dim_core.ipx+1)*np.floor(dim_core.nx*trace_sub)), \ np.round(dim_core.ipy*int(dim_core.ny*trace_sub)):np.round((dim_core.ipy+1)*np.floor(dim_core.ny*trace_sub)),j,:] = \ mapping_core[:,:,j,:] # swap axes for post evaluation tracers = tracers.swapaxes(0, 1) mapping = mapping.swapaxes(0, 1) return tracers, mapping, t
def tracers(traceField = 'bb', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, interpolation = 'weighted', trace_sub = 1, intQ = [''], varfile = 'VAR0', ti = -1, tf = -1, integration = 'simple', datadir = 'data/', destination = 'tracers.dat', nproc = 1): """ Trace streamlines from the VAR files and integrate quantity 'intQ' along them. call signature:: tracers(field = 'bb', hMin = 2e-3, hMax = 2e2, lMax = 500, tol = 2e-3, interpolation = 'weighted', trace_sub = 1, intQ = '', varfile = 'VAR0', ti = -1, tf = -1, datadir = 'data', destination = 'tracers.dat', nproc = 1) Trace streamlines of the vectofield 'field' from z = z0 to z = z1 and integrate quantities 'intQ' along the lines. Creates a 2d mapping as in 'streamlines.f90'. Keyword arguments: *traceField*: Vector field used for the streamline tracing. *hMin*: Minimum step length for and underflow to occur. *hMax*: Parameter for the initial step length. *lMax*: Maximum length of the streamline. Integration will stop if l >= lMax. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'weighted': weights the adjacent grid points according to their distance. *trace_sub*: Number of sub-grid cells for the seeds. *intQ*: Quantities to be integrated along the streamlines. *varfile*: Varfile to be read. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *ti*: Initial VAR file index for tracer time sequences. Overrides 'varfile'. *tf*: Final VAR file index for tracer time sequences. Overrides 'varfile'. *datadir*: Directory where the data is stored. *destination*: Destination file. *nproc*: Number of cores for multi core computation. """ # returns the tracers for the specified starting locations def subTracers(q, vv, p, tracers0, iproc, hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, interpolation = 'weighted', integration = 'simple', intQ = ['']): tracers = tracers0 mapping = np.zeros((tracers.shape[0], tracers.shape[1], 3)) for ix in range(tracers.shape[0]): for iy in range(tracers.shape[1]): xx = tracers[ix, iy, 2:5].copy() s = pc.stream(vv, p, interpolation = interpolation, integration = integration, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, xx = xx) tracers[ix, iy, 2:5] = s.tracers[s.sl-1] tracers[ix, iy, 5] = s.l if (any(intQ == 'curlyA')): for l in range(s.sl-1): aaInt = pc.vecInt((s.tracers[l+1] + s.tracers[l])/2, aa, p, interpolation) tracers[ix, iy, 6] += np.dot(aaInt, (s.tracers[l+1] - s.tracers[l])) # create the color mapping if (tracers[ix, iy, 4] > grid.z[-2]): if (tracers[ix, iy, 0] - tracers[ix, iy, 2]) > 0: if (tracers[ix, iy, 1] - tracers[ix, iy, 3]) > 0: mapping[ix, iy, :] = [0,1,0] else: mapping[ix, iy, :] = [1,1,0] else: if (tracers[ix, iy, 1] - tracers[ix, iy, 3]) > 0: mapping[ix, iy, :] = [0,0,1] else: mapping[ix, iy, :] = [1,0,0] else: mapping[ix, iy, :] = [1,1,1] q.put((tracers, mapping, iproc)) # multi core setup if (np.isscalar(nproc) == False) or (nproc%1 != 0): print("error: invalid processor number") return -1 queue = mp.Queue() # read the data # make sure to read the var files with the correct magic if (traceField == 'bb'): magic = 'bb' if (traceField == 'jj'): magic = 'jj' if (traceField == 'vort'): magic = 'vort' # convert intQ string into list if (isinstance(intQ, list) == False): intQ = [intQ] intQ = np.array(intQ) grid = pc.read_grid(datadir = datadir, trim = True, quiet = True) dim = pc.read_dim(datadir = datadir) tol2 = tol**2 # check if user wants a tracer time series if ((ti%1 == 0) and (tf%1 == 0) and (ti >= 0) and (tf >= ti)): series = True n_times = tf-ti+1 else: series = False n_times = 1 tracers = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), n_times, 6+len(intQ)]) mapping = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), n_times, 3]) t = np.zeros(n_times) for tIdx in range(n_times): if series: varfile = 'VAR' + str(tIdx) # read the data var = pc.read_var(varfile = varfile, datadir = datadir, magic = magic, quiet = True, trimall = True) grid = pc.read_grid(datadir = datadir, quiet = True, trim = True) t[tIdx] = var.t # extract the requested vector traceField vv = getattr(var, traceField) if (any(intQ == 'curlyA')): aa = var.aa # initialize the parameters p = pc.pClass() p.dx = var.dx; p.dy = var.dy; p.dz = var.dz p.Ox = var.x[0]; p.Oy = var.y[0]; p.Oz = var.z[0] p.Lx = grid.Lx; p.Ly = grid.Ly; p.Lz = grid.Lz p.nx = dim.nx; p.ny = dim.ny; p.nz = dim.nz # initialize the tracers for ix in range(int(trace_sub*dim.nx)): for iy in range(int(trace_sub*dim.ny)): tracers[ix, iy, tIdx, 0] = grid.x[0] + int(grid.dx/trace_sub)*ix tracers[ix, iy, tIdx, 2] = tracers[ix, iy, tIdx, 0] tracers[ix, iy, tIdx, 1] = grid.y[0] + int(grid.dy/trace_sub)*iy tracers[ix, iy, tIdx, 3] = tracers[ix, iy, tIdx, 1] tracers[ix, iy, tIdx, 4] = grid.z[0] # declare vectors xMid = np.zeros(3) xSingle = np.zeros(3) xHalf = np.zeros(3) xDouble = np.zeros(3) tmp = [] subTracersLambda = lambda queue, vv, p, tracers, iproc: \ subTracers(queue, vv, p, tracers, iproc, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration, intQ = intQ) proc = [] for iproc in range(nproc): proc.append(mp.Process(target = subTracersLambda, args = (queue, vv, p, tracers[iproc::nproc,:,tIdx,:], iproc))) for iproc in range(nproc): proc[iproc].start() for iproc in range(nproc): tmp.append(queue.get()) for iproc in range(nproc): proc[iproc].join() for iproc in range(nproc): tracers[tmp[iproc][2]::nproc,:,tIdx,:], mapping[tmp[iproc][2]::nproc,:,tIdx,:] = (tmp[iproc][0], tmp[iproc][1]) for iproc in range(nproc): proc[iproc].terminate() tracers = np.copy(tracers.swapaxes(0, 3), order = 'C') if (destination != ''): f = open(datadir + destination, 'wb') f.write(np.array(trace_sub, dtype = 'float32')) # write tracers into file for tIdx in range(n_times): f.write(t[tIdx].astype('float32')) f.write(tracers[:,:,tIdx,:].astype('float32')) f.close() tracers = tracers.swapaxes(0, 3) tracers = tracers.swapaxes(0, 1) mapping = mapping.swapaxes(0, 1) return tracers, mapping, t
# $Id: fourier.py,v 1.3 2009-04-27 10:29:47 dintrans Exp $ # Fourier diagram of the vertical velocity # import numpy as N import pylab as P import pencil as pc from scipy.integrate import simps from theory import * uz, t = pc.read_slices(field='uu3', proc=0) dim = pc.read_dim() par = pc.read_param(quiet=True) par2 = pc.read_param(quiet=True, param2=True) grid = pc.read_grid(param=par, quiet=True, trim=True) nt = len(t) uz = uz.reshape(nt, dim.nz, dim.nx) w1 = N.empty((nt, dim.nz, dim.nx), dtype='Complex64') for i in range(dim.nz): w1[:, i, :] = N.fft.fft2(uz[:, i, :]) / nt / dim.nx w2 = N.abs(w1[1:nt / 2 + 1, ...]) dw = 2 * N.pi / (t[-1] - t[0]) w = dw * N.arange(nt) w = w[1:nt / 2 + 1] kmax = 5 inte = N.empty((nt / 2, kmax + 1), dtype='Float64') for k in range(kmax + 1): for i in range(nt / 2):
def calc_tensors(datatopdir, lskip_zeros=False, datadir='data/', rank=0, size=1, proc=[0], l_mpi=True, uxmz=0, first_alpha=9, l_correction=False, t_correction=0., yindex=[]): dim = pc.read_dim() if len(yindex) == 0: iy = np.arange(dim.ny) else: iy = yindex os.chdir(datatopdir) # return to working directory if l_mpi: av = [] if proc.size < dim.nprocz: yproc = proc[0] / dim.nprocz yndx = iy - yproc * (dim.nygrid / dim.nprocy) #print 'yndx[0], rank',yndx[0],iy[0], rank aav, time = pc.read_zaver(datadir, proc=yproc) av = aav[:, :, yndx, :] else: for iproc in range(0, proc.size, dim.nprocz): aav, time = pc.read_zaver(datadir, proc=iproc / dim.nprocz) if iproc == 0: av = aav else: av = np.concatenate((av, aav), axis=2) else: av, time = pc.read_zaver(datadir) #where testfield calculated under old incorrect spec apply correction if l_correction: itcorr = np.where(time < t_correction)[0] av[itcorr, first_alpha + 2] += -dim.nprocz / (dim.nprocz - 2.) #factor by which to rescale code time to years trescale = 0.62 / 2.7e-6 / (365. * 86400.) #0.007281508 time *= trescale grid = pc.read_grid(datadir, trim=True, quiet=True) r, theta = np.meshgrid(grid.x, grid.y[iy]) #exclude zeros and next point if resetting of test fields is used if lskip_zeros: izer0 = np.where(av[:, first_alpha, av.shape[2] / 2, av.shape[3] / 2] == 0)[0] izer1 = np.where(av[:, first_alpha, av.shape[2] / 2, av.shape[3] / 2] == 0)[0] + 1 if izer0.size > 0: imask = np.delete(np.where(time), [izer0, izer1]) else: imask = np.where(time)[0] else: imask = np.arange(time.size) alp = np.zeros([imask.size, av.shape[2], av.shape[3], 3, 3]) eta = np.zeros([imask.size, av.shape[2], av.shape[3], 3, 3, 3]) urmst = np.zeros([av.shape[2], av.shape[3], 3, 3]) etat0 = np.zeros([av.shape[2], av.shape[3], 3, 3, 3]) #eta0 = np.zeros([imask.size,av.shape[2],av.shape[3],3,3,3]) Hp = np.zeros([av.shape[2], av.shape[3]]) #compute rms velocity normalisation urms = np.sqrt( np.mean(av[imask, uxmz + 3, :, :] - av[imask, uxmz + 0, :, :]**2 + av[imask, uxmz + 4, :, :] - av[imask, uxmz + 1, :, :]**2 + av[imask, uxmz + 5, :, :] - av[imask, uxmz + 2, :, :]**2, axis=0)) #compute turbulent diffusion normalisation cv, gm, alp_MLT = 0.6, 5. / 3, 5. / 3 pp = np.mean(av[imask, 6, :, :] * av[imask, 7, :, :] * cv * (gm - 1), axis=0) for i in range(0, av.shape[2]): Hp[i, :] = -1. / np.gradient(np.log(pp[i, :]), grid.dx) for i in range(0, 3): for j in range(0, 3): alp[:, :, :, i, j] = av[imask, first_alpha + 3 * i + j, :, :] urmst[:, :, i, j] = urms / 3. for k in range(0, 3): etat0[:, :, i, j, k] = urms * alp_MLT * Hp / 3. #for i in range(0,imask.size): # eta0[i,:,:,:,:,:] = etat0 for j in range(0, 3): for k in range(0, 3): # Sign difference with Schrinner + r correction eta[:, :, :, 1, j, k] = -av[imask, first_alpha + 18 + 3 * j + k, :, :] * r eta[:, :, :, 0, j, k] = -av[imask, first_alpha + 9 + 3 * j + k, :, :] irr, ith, iph = 0, 1, 2 # Create output tensors alpha = np.zeros([imask.size, av.shape[2], av.shape[3], 3, 3]) beta = np.zeros([imask.size, av.shape[2], av.shape[3], 3, 3]) gamma = np.zeros([imask.size, av.shape[2], av.shape[3], 3]) delta = np.zeros([imask.size, av.shape[2], av.shape[3], 3]) kappa = np.zeros([imask.size, av.shape[2], av.shape[3], 3, 3, 3]) # Alpha tensor alpha[:, :, :, irr, irr] = (alp[:, :, :, irr, irr] - eta[:, :, :, ith, ith, irr] / r) alpha[:, :, :, irr, ith] = 0.5 * ( alp[:, :, :, ith, irr] + eta[:, :, :, ith, irr, irr] / r + alp[:, :, :, irr, ith] - eta[:, :, :, ith, ith, ith] / r) alpha[:, :, :, irr, iph] = 0.5 * (alp[:, :, :, iph, irr] + alp[:, :, :, irr, iph] - eta[:, :, :, ith, ith, iph] / r) alpha[:, :, :, ith, irr] = alpha[:, :, :, irr, ith] alpha[:, :, :, ith, ith] = (alp[:, :, :, ith, ith] + eta[:, :, :, ith, irr, ith] / r) alpha[:, :, :, ith, iph] = 0.5 * (alp[:, :, :, iph, ith] + alp[:, :, :, ith, iph] + eta[:, :, :, ith, irr, iph] / r) alpha[:, :, :, iph, irr] = alpha[:, :, :, irr, iph] alpha[:, :, :, iph, ith] = alpha[:, :, :, ith, iph] alpha[:, :, :, iph, iph] = alp[:, :, :, iph, iph] # Gamma vector gamma[:, :, :, irr] = -0.5 * (alp[:, :, :, iph, ith] - alp[:, :, :, ith, iph] - eta[:, :, :, ith, irr, iph] / r) gamma[:, :, :, ith] = -0.5 * (alp[:, :, :, irr, iph] - alp[:, :, :, iph, irr] - eta[:, :, :, ith, ith, iph] / r) gamma[:, :, :, iph] = -0.5 * (alp[:, :, :, ith, irr] - alp[:, :, :, irr, ith] + eta[:, :, :, ith, irr, irr] / r + eta[:, :, :, ith, ith, ith] / r) # Beta tensor beta[:, :, :, irr, irr] = -0.5 * eta[:, :, :, ith, iph, irr] beta[:, :, :, irr, ith] = 0.25 * (eta[:, :, :, irr, iph, irr] - eta[:, :, :, ith, iph, ith]) beta[:, :, :, irr, iph] = 0.25 * (eta[:, :, :, ith, irr, irr] - eta[:, :, :, ith, iph, iph] - eta[:, :, :, irr, ith, irr]) beta[:, :, :, ith, irr] = beta[:, :, :, irr, ith] beta[:, :, :, ith, ith] = 0.5 * eta[:, :, :, irr, iph, ith] beta[:, :, :, ith, iph] = 0.25 * (eta[:, :, :, ith, irr, ith] + eta[:, :, :, irr, iph, iph] - eta[:, :, :, irr, ith, ith]) beta[:, :, :, iph, irr] = beta[:, :, :, irr, iph] beta[:, :, :, iph, ith] = beta[:, :, :, ith, iph] beta[:, :, :, iph, iph] = 0.5 * (eta[:, :, :, ith, irr, iph] - eta[:, :, :, irr, ith, iph]) # Delta vector delta[:, :, :, irr] = 0.25 * (eta[:, :, :, irr, ith, ith] - eta[:, :, :, ith, irr, ith] + eta[:, :, :, irr, iph, iph]) delta[:, :, :, ith] = 0.25 * (eta[:, :, :, ith, irr, irr] - eta[:, :, :, irr, ith, irr] + eta[:, :, :, ith, iph, iph]) delta[:, :, :, iph] = -0.25 * (eta[:, :, :, irr, iph, irr] + eta[:, :, :, ith, iph, ith]) # Kappa tensor for i in range(0, 3): kappa[:, :, :, irr, irr, i] = -eta[:, :, :, irr, irr, i] kappa[:, :, :, ith, irr, i] = -0.5 * (eta[:, :, :, ith, irr, i] + eta[:, :, :, irr, ith, i]) kappa[:, :, :, iph, irr, i] = -0.5 * eta[:, :, :, irr, iph, i] kappa[:, :, :, irr, ith, i] = kappa[:, :, :, ith, irr, i] kappa[:, :, :, ith, ith, i] = -eta[:, :, :, ith, ith, i] kappa[:, :, :, iph, ith, i] = -0.5 * eta[:, :, :, ith, iph, i] kappa[:, :, :, irr, iph, i] = kappa[:, :, :, iph, irr, i] kappa[:, :, :, ith, iph, i] = kappa[:, :, :, iph, ith, i] #for it in range(0,imask.size): # kappa[it,:,:,iph,iph,i]= 1e-9*etat0[:,:,0,0,i] return alpha, beta, gamma, delta, kappa,\ time[imask], urmst, etat0
def find_fixed(self, datadir='data/', destination='fixed_points.hf5', varfile='VAR0', ti=-1, tf=-1, trace_field='bb', h_min=2e-3, h_max=2e4, len_max=500, tol=1e-2, interpolation='trilinear', trace_sub=1, integration='simple', int_q=[''], n_proc=1, tracer_file_name=''): """ Find the fixed points. call signature:: find_fixed(datadir='data/', destination='fixed_points.hf5', varfile='VAR0', ti=-1, tf=-1, trace_field='bb', h_min=2e-3, h_max=2e4, len_max=500, tol=1e-2, interpolation='trilinear', trace_sub=1, integration='simple', int_q=[''], n_proc=1): Finds the fixed points. Returns the fixed points positions. Keyword arguments: *datadir*: Data directory. *destination*: Name of the fixed points file. *varfile*: Varfile to be read. *ti*: Initial VAR file index for tracer time sequences. *tf*: Final VAR file index for tracer time sequences. *trace_field*: Vector field used for the streamline tracing. *h_min*: Minimum step length for and underflow to occur. *h_max*: Parameter for the initial step length. *len_max*: Maximum length of the streamline. Integration will stop if l >= len_max. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'trilinear': weights the adjacent grid points according to their distance. *trace_sub*: Number of sub-grid cells for the seeds for the initial mapping. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *int_q*: Quantities to be integrated along the streamlines. *n_proc*: Number of cores for multi core computation. *tracer_file_name* Name of the tracer file to be read. If equal to '' it will compute the tracers. """ import numpy as np # Return the fixed points for a subset of the domain. def __sub_fixed(queue, ix0, iy0, field, tracers, tidx, var, i_proc): diff = np.zeros((4, 2)) fixed = [] fixed_sign = [] fidx = 0 poincare_array = np.zeros( (tracers.x0[i_proc::self.params.n_proc].shape[0], tracers.x0.shape[1])) for ix in ix0[i_proc::self.params.n_proc]: for iy in iy0: # Compute Poincare index around this cell (!= 0 for potential fixed point). diff[0, :] = np.array([ tracers.x1[ix, iy, tidx] - tracers.x0[ix, iy, tidx], tracers.y1[ix, iy, tidx] - tracers.y0[ix, iy, tidx] ]) diff[1, :] = np.array([ tracers.x1[ix + 1, iy, tidx] - tracers.x0[ix + 1, iy, tidx], tracers.y1[ix + 1, iy, tidx] - tracers.y0[ix + 1, iy, tidx] ]) diff[2, :] = np.array([ tracers.x1[ix + 1, iy + 1, tidx] - tracers.x0[ix + 1, iy + 1, tidx], tracers.y1[ix + 1, iy + 1, tidx] - tracers.y0[ix + 1, iy + 1, tidx] ]) diff[3, :] = np.array([ tracers.x1[ix, iy + 1, tidx] - tracers.x0[ix, iy + 1, tidx], tracers.y1[ix, iy + 1, tidx] - tracers.y0[ix, iy + 1, tidx] ]) if sum(np.sum(diff**2, axis=1) != 0): diff = np.swapaxes( np.swapaxes(diff, 0, 1) / np.sqrt(np.sum(diff**2, axis=1)), 0, 1) poincare = __poincare_index( field, tracers.x0[ix:ix + 2, iy, tidx], tracers.y0[ix, iy:iy + 2, tidx], diff) poincare_array[ix / n_proc, iy] = poincare if abs( poincare ) > 5: # Use 5 instead of 2*pi to account for rounding errors. # Subsample to get starting point for iteration. nt = 4 xmin = tracers.x0[ix, iy, tidx] ymin = tracers.y0[ix, iy, tidx] xmax = tracers.x0[ix + 1, iy, tidx] ymax = tracers.y0[ix, iy + 1, tidx] xx = np.zeros((nt**2, 3)) tracers_part = np.zeros((nt**2, 5)) i1 = 0 for j1 in range(nt): for k1 in range(nt): xx[i1, 0] = xmin + j1 / (nt - 1.) * (xmax - xmin) xx[i1, 1] = ymin + k1 / (nt - 1.) * (ymax - ymin) xx[i1, 2] = self.params.Oz i1 += 1 for it1 in range(nt**2): stream = Stream( field, self.params, h_min=self.params.h_min, h_max=self.params.h_max, len_max=self.params.len_max, tol=self.params.tol, interpolation=self.params.interpolation, integration=self.params.integration, xx=xx[it1, :]) tracers_part[it1, 0:2] = xx[it1, 0:2] tracers_part[it1, 2:] = stream.tracers[ stream.stream_len - 1, :] min2 = 1e6 minx = xmin miny = ymin i1 = 0 for j1 in range(nt): for k1 in range(nt): diff2 = (tracers_part[i1+k1*nt, 2] - \ tracers_part[i1+k1*nt, 0])**2 + \ (tracers_part[i1+k1*nt, 3] - \ tracers_part[i1+k1*nt, 1])**2 if diff2 < min2: min2 = diff2 minx = xmin + j1 / (nt - 1.) * (xmax - xmin) miny = ymin + k1 / (nt - 1.) * (ymax - ymin) it1 += 1 # Get fixed point from this starting position using Newton's method. point = np.array([minx, miny]) fixed_point = __null_point(point, var) # Check if fixed point lies inside the cell. if ((fixed_point[0] < tracers.x0[ix, iy, tidx]) or (fixed_point[0] > tracers.x0[ix + 1, iy, tidx]) or (fixed_point[1] < tracers.y0[ix, iy, tidx]) or (fixed_point[1] > tracers.y0[ix, iy + 1, tidx])): pass else: fixed.append(fixed_point) fixed_sign.append(np.sign(poincare)) fidx += np.sign(poincare) queue.put((i_proc, fixed, fixed_sign, fidx, poincare_array)) # Find the Poincare index of this grid cell. def __poincare_index(field, sx, sy, diff): poincare = 0 poincare += __edge(field, [sx[0], sx[1]], [sy[0], sy[0]], diff[0, :], diff[1, :], 0) poincare += __edge(field, [sx[1], sx[1]], [sy[0], sy[1]], diff[1, :], diff[2, :], 0) poincare += __edge(field, [sx[1], sx[0]], [sy[1], sy[1]], diff[2, :], diff[3, :], 0) poincare += __edge(field, [sx[0], sx[0]], [sy[1], sy[0]], diff[3, :], diff[0, :], 0) return poincare # Compute rotation along one edge. def __edge(field, sx, sy, diff1, diff2, rec): phiMin = np.pi / 8. dtot = m.atan2(diff1[0] * diff2[1] - diff2[0] * diff1[1], diff1[0] * diff2[0] + diff1[1] * diff2[1]) if (abs(dtot) > phiMin) and (rec < 4): xm = 0.5 * (sx[0] + sx[1]) ym = 0.5 * (sy[0] + sy[1]) # Trace the intermediate field line. stream = Stream(field, self.params, h_min=self.params.h_min, h_max=self.params.h_max, len_max=self.params.len_max, tol=self.params.tol, interpolation=self.params.interpolation, integration=self.params.integration, xx=np.array([xm, ym, self.params.Oz])) stream_x0 = stream.tracers[0, 0] stream_y0 = stream.tracers[0, 1] stream_x1 = stream.tracers[stream.stream_len - 1, 0] stream_y1 = stream.tracers[stream.stream_len - 1, 1] stream_z1 = stream.tracers[stream.stream_len - 1, 2] # Discard any streamline which does not converge or hits the boundary. # if ((stream.len >= len_max) or # (stream_z1 < self.params.Oz+self.params.Lz-10*self.params.dz)): # dtot = 0. if False: pass else: diffm = np.array( [stream_x1 - stream_x0, stream_y1 - stream_y0]) if sum(diffm**2) != 0: diffm = diffm / np.sqrt(sum(diffm**2)) dtot = __edge(field, [sx[0], xm], [sy[0], ym], diff1, diffm, rec+1) + \ __edge(field, [xm, sx[1]], [ym, sy[1]], diffm, diff2, rec+1) return dtot # Finds the null point of the mapping, i.e. fixed point, using Newton's method. def __null_point(point, var): dl = np.min(var.dx, var.dy) / 100. it = 0 # Tracers used to find the fixed point. tracers_null = np.zeros((5, 4)) while True: # Trace field lines at original point and for Jacobian. # (second order seems to be enough) xx = np.zeros((5, 3)) xx[0, :] = np.array([point[0], point[1], self.params.Oz]) xx[1, :] = np.array([point[0] - dl, point[1], self.params.Oz]) xx[2, :] = np.array([point[0] + dl, point[1], self.params.Oz]) xx[3, :] = np.array([point[0], point[1] - dl, self.params.Oz]) xx[4, :] = np.array([point[0], point[1] + dl, self.params.Oz]) for it1 in range(5): stream = Stream(field, self.params, h_min=self.params.h_min, h_max=self.params.h_max, len_max=self.params.len_max, tol=self.params.tol, interpolation=self.params.interpolation, integration=self.params.integration, xx=xx[it1, :]) tracers_null[it1, :2] = xx[it1, :2] tracers_null[it1, 2:] = stream.tracers[stream.stream_len - 1, 0:2] # Check function convergence. ff = np.zeros(2) ff[0] = tracers_null[0, 2] - tracers_null[0, 0] ff[1] = tracers_null[0, 3] - tracers_null[0, 1] if sum(abs(ff)) <= 1e-3 * np.min(self.params.dx, self.params.dy): fixed_point = np.array([point[0], point[1]]) break # Compute the Jacobian. fjac = np.zeros((2, 2)) fjac[0, 0] = ((tracers_null[2, 2] - tracers_null[2, 0]) - (tracers_null[1, 2] - tracers_null[1, 0])) / 2. / dl fjac[0, 1] = ((tracers_null[4, 2] - tracers_null[4, 0]) - (tracers_null[3, 2] - tracers_null[3, 0])) / 2. / dl fjac[1, 0] = ((tracers_null[2, 3] - tracers_null[2, 1]) - (tracers_null[1, 3] - tracers_null[1, 1])) / 2. / dl fjac[1, 1] = ((tracers_null[4, 3] - tracers_null[4, 1]) - (tracers_null[3, 3] - tracers_null[3, 1])) / 2. / dl # Invert the Jacobian. fjin = np.zeros((2, 2)) det = fjac[0, 0] * fjac[1, 1] - fjac[0, 1] * fjac[1, 0] if abs(det) < dl: fixed_point = point break fjin[0, 0] = fjac[1, 1] fjin[1, 1] = fjac[0, 0] fjin[0, 1] = -fjac[0, 1] fjin[1, 0] = -fjac[1, 0] fjin = fjin / det dpoint = np.zeros(2) dpoint[0] = -fjin[0, 0] * ff[0] - fjin[0, 1] * ff[1] dpoint[1] = -fjin[1, 0] * ff[0] - fjin[1, 1] * ff[1] point += dpoint # Check root convergence. if sum(abs(dpoint)) < 1e-3 * np.min(self.params.dx, self.params.dy): fixed_point = point break if it > 20: fixed_point = point break it += 1 return fixed_point # Find the fixed point using Newton's method, starting at previous fixed point. def __sub_fixed_series(queue, t_idx, field, var, i_proc): fixed = [] fixed_sign = [] for i, point in enumerate( self.fixed_points[t_idx - 1][i_proc::self.params.n_proc]): fixed_tentative = __null_point(point, var) # Check if the fixed point lies outside the domain. if fixed_tentative[0] >= self.params.Ox and \ fixed_tentative[1] >= self.params.Oy and \ fixed_tentative[0] <= self.params.Ox+self.params.Lx and \ fixed_tentative[1] <= self.params.Oy+self.params.Ly: fixed.append(fixed_tentative) fixed_sign.append(self.fixed_sign[t_idx - 1][i_proc + i * n_proc]) queue.put((i_proc, fixed, fixed_sign)) # Discard fixed points which are too close to each other. def __discard_close_fixed_points(fixed, fixed_sign, var): fixed_new = [] fixed_sign_new = [] if len(fixed) > 0: fixed_new.append(fixed[0]) fixed_sign_new.append(fixed_sign[0]) dx = fixed[:, 0] - np.reshape(fixed[:, 0], (fixed.shape[0], 1)) dy = fixed[:, 1] - np.reshape(fixed[:, 1], (fixed.shape[0], 1)) mask = (abs(dx) > var.dx / 2) + (abs(dy) > var.dy / 2) for idx in range(1, fixed.shape[0]): if all(mask[idx, :idx]): fixed_new.append(fixed[idx]) fixed_sign_new.append(fixed_sign[idx]) return np.array(fixed_new), np.array(fixed_sign_new) # Convert int_q string into list. if not isinstance(int_q, list): int_q = [int_q] self.params.int_q = int_q if any(np.array(self.params.int_q) == 'curly_A'): self.curly_A = [] if any(np.array(self.params.int_q) == 'ee'): self.ee = [] # Multi core setup. if not (np.isscalar(n_proc)) or (n_proc % 1 != 0): print("error: invalid processor number") return -1 queue = mp.Queue() # Write the tracing parameters. self.params = TracersParameterClass() self.params.trace_field = trace_field self.params.h_min = h_min self.params.h_max = h_max self.params.len_max = len_max self.params.tol = tol self.params.interpolation = interpolation self.params.trace_sub = trace_sub self.params.int_q = int_q self.params.varfile = varfile self.params.ti = ti self.params.tf = tf self.params.integration = integration self.params.datadir = datadir self.params.destination = destination self.params.n_proc = n_proc # Make sure to read the var files with the correct magic. magic = [] if trace_field == 'bb': magic.append('bb') if trace_field == 'jj': magic.append('jj') if trace_field == 'vort': magic.append('vort') if any(np.array(int_q) == 'ee'): magic.append('bb') magic.append('jj') dim = pc.read_dim(datadir=datadir) # Check if user wants a tracer time series. if (ti % 1 == 0) and (tf % 1 == 0) and (ti >= 0) and (tf >= ti): series = True varfile = 'VAR' + str(ti) n_times = tf - ti + 1 else: series = False n_times = 1 self.t = np.zeros(n_times) # Read the initial field. var = pc.read_var(varfile=varfile, datadir=datadir, magic=magic, quiet=True, trimall=True) self.t[0] = var.t grid = pc.read_grid(datadir=datadir, quiet=True, trim=True) field = getattr(var, trace_field) param2 = pc.read_param(datadir=datadir, param2=True, quiet=True) if any(np.array(int_q) == 'ee'): ee = var.jj * param2.eta - pc.cross(var.uu, var.bb) # Get the simulation parameters. self.params.dx = var.dx self.params.dy = var.dy self.params.dz = var.dz self.params.Ox = var.x[0] self.params.Oy = var.y[0] self.params.Oz = var.z[0] self.params.Lx = grid.Lx self.params.Ly = grid.Ly self.params.Lz = grid.Lz self.params.nx = dim.nx self.params.ny = dim.ny self.params.nz = dim.nz tracers = Tracers() # Create the mapping for all times. if not tracer_file_name: tracers.find_tracers(trace_field=trace_field, h_min=h_min, h_max=h_max, len_max=len_max, tol=tol, interpolation=interpolation, trace_sub=trace_sub, varfile=varfile, ti=ti, tf=tf, integration=integration, datadir=datadir, int_q=int_q, n_proc=n_proc) else: tracers.read(datadir=datadir, file_name=tracer_file_name) self.tracers = tracers # Set some default values. self.t = np.zeros((tf - ti + 1) * series + (1 - series)) self.fidx = np.zeros((tf - ti + 1) * series + (1 - series)) self.poincare = np.zeros( [int(trace_sub * dim.nx), int(trace_sub * dim.ny), n_times]) ix0 = range(0, int(self.params.nx * trace_sub) - 1) iy0 = range(0, int(self.params.ny * trace_sub) - 1) # Start the parallelized fixed point finding. for tidx in range(n_times): if tidx > 0: var = pc.read_var(varfile='VAR' + str(tidx + ti), datadir=datadir, magic=magic, quiet=True, trimall=True) field = getattr(var, trace_field) self.t[tidx] = var.t proc = [] sub_data = [] fixed = [] fixed_sign = [] for i_proc in range(n_proc): proc.append( mp.Process(target=__sub_fixed, args=(queue, ix0, iy0, field, self.tracers, tidx, var, i_proc))) for i_proc in range(n_proc): proc[i_proc].start() for i_proc in range(n_proc): sub_data.append(queue.get()) for i_proc in range(n_proc): proc[i_proc].join() for i_proc in range(n_proc): # Extract the data from the single cores. Mind the order. sub_proc = sub_data[i_proc][0] fixed.extend(sub_data[i_proc][1]) fixed_sign.extend(sub_data[i_proc][2]) self.fidx[tidx] += sub_data[i_proc][3] self.poincare[sub_proc::n_proc, :, tidx] = sub_data[i_proc][4] for i_proc in range(n_proc): proc[i_proc].terminate() # Discard fixed points which lie too close to each other. fixed, fixed_sign = __discard_close_fixed_points( np.array(fixed), np.array(fixed_sign), var) self.fixed_points.append(np.array(fixed)) self.fixed_sign.append(np.array(fixed_sign)) # Compute the traced quantities along the fixed point streamlines. if any(np.array(self.params.int_q) == 'curly_A') or \ any(np.array(self.params.int_q) == 'ee'): for t_idx in range(0, n_times): if any(np.array(self.params.int_q) == 'curly_A'): self.curly_A.append([]) if any(np.array(self.params.int_q) == 'ee'): self.ee.append([]) for fixed in self.fixed_points[t_idx]: # Trace the stream line. xx = np.array([fixed[0], fixed[1], self.params.Oz]) stream = Stream(field, self.params, h_min=self.params.h_min, h_max=self.params.h_max, len_max=self.params.len_max, tol=self.params.tol, interpolation=self.params.interpolation, integration=self.params.integration, xx=xx) # Do the field line integration. if any(np.array(self.params.int_q) == 'curly_A'): curly_A = 0 for l in range(stream.stream_len - 1): aaInt = vec_int( (stream.tracers[l + 1] + stream.tracers[l]) / 2, var, var.aa, interpolation=self.params.interpolation) curly_A += np.dot( aaInt, (stream.tracers[l + 1] - stream.tracers[l])) self.curly_A[-1].append(curly_A) if any(np.array(self.params.int_q) == 'ee'): ee_p = 0 for l in range(stream.stream_len - 1): eeInt = vec_int( (stream.tracers[l + 1] + stream.tracers[l]) / 2, var, ee, interpolation=self.params.interpolation) ee_p += np.dot( eeInt, (stream.tracers[l + 1] - stream.tracers[l])) self.ee[-1].append(ee_p) if any(np.array(self.params.int_q) == 'curly_A'): self.curly_A[-1] = np.array(self.curly_A[-1]) if any(np.array(self.params.int_q) == 'ee'): self.ee[-1] = np.array(self.ee[-1])
def fixed_points(datadir = 'data/', fileName = 'fixed_points_post.dat', varfile = 'VAR0', ti = -1, tf = -1, traceField = 'bb', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, interpolation = 'weighted', trace_sub = 1, integration = 'simple', nproc = 1): """ Find the fixed points. call signature:: fixed = fixed_points(datadir = 'data/', fileName = 'fixed_points_post.dat', varfile = 'VAR0', ti = -1, tf = -1, traceField = 'bb', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, interpolation = 'weighted', trace_sub = 1, integration = 'simple', nproc = 1) Finds the fixed points. Returns the fixed points positions. Keyword arguments: *datadir*: Data directory. *fileName*: Name of the fixed points file. *varfile*: Varfile to be read. *ti*: Initial VAR file index for tracer time sequences. Overrides 'varfile'. *tf*: Final VAR file index for tracer time sequences. Overrides 'varfile'. *traceField*: Vector field used for the streamline tracing. *hMin*: Minimum step length for and underflow to occur. *hMax*: Parameter for the initial step length. *lMax*: Maximum length of the streamline. Integration will stop if l >= lMax. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'weighted': weights the adjacent grid points according to their distance. *trace_sub*: Number of sub-grid cells for the seeds for the initial mapping. *intQ*: Quantities to be integrated along the streamlines. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *nproc*: Number of cores for multi core computation. """ class data_struct: def __init__(self): self.t = [] self.fidx = [] # number of fixed points at this time self.x = [] self.y = [] self.q = [] # Computes rotation along one edge. def edge(vv, p, sx, sy, diff1, diff2, phiMin, rec, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration): dtot = m.atan2(diff1[0]*diff2[1] - diff2[0]*diff1[1], diff1[0]*diff2[0] + diff1[1]*diff2[1]) if ((abs(dtot) > phiMin) and (rec < 4)): xm = 0.5*(sx[0]+sx[1]) ym = 0.5*(sy[0]+sy[1]) # trace intermediate field line s = pc.stream(vv, p, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration, xx = np.array([xm, ym, p.Oz])) tracer = np.concatenate((s.tracers[0,0:2], s.tracers[s.sl-1,:], np.reshape(s.l,(1)))) # discard any streamline which does not converge or hits the boundary if ((tracer[5] >= lMax) or (tracer[4] < p.Oz+p.Lz-p.dz)): dtot = 0. else: diffm = np.array([tracer[2] - tracer[0], tracer[3] - tracer[1]]) if (sum(diffm**2) != 0): diffm = diffm / np.sqrt(sum(diffm**2)) dtot = edge(vv, p, [sx[0], xm], [sy[0], ym], diff1, diffm, phiMin, rec+1, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration)+ \ edge(vv, p, [xm, sx[1]], [ym, sy[1]], diffm, diff2, phiMin, rec+1, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) return dtot # Finds the Poincare index of this grid cell. def pIndex(vv, p, sx, sy, diff, phiMin, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration): poincare = 0 poincare += edge(vv, p, [sx[0], sx[1]], [sy[0], sy[0]], diff[0,:], diff[1,:], phiMin, 0, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) poincare += edge(vv, p, [sx[1], sx[1]], [sy[0], sy[1]], diff[1,:], diff[2,:], phiMin, 0, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) poincare += edge(vv, p, [sx[1], sx[0]], [sy[1], sy[1]], diff[2,:], diff[3,:], phiMin, 0, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) poincare += edge(vv, p, [sx[0], sx[0]], [sy[1], sy[0]], diff[3,:], diff[0,:], phiMin, 0, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) return poincare # fixed point finder for a subset of the domain def subFixed(queue, ix0, iy0, vv, p, tracers, iproc, hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, interpolation = 'weighted', integration = 'simple'): diff = np.zeros((4,2)) phiMin = np.pi/8. x = [] y = [] q = [] fidx = 0 for ix in ix0: for iy in iy0: # compute Poincare index around this cell (!= 0 for potential fixed point) diff[0,:] = tracers[iy, ix, 0, 2:4] - tracers[iy, ix, 0, 0:2] diff[1,:] = tracers[iy, ix+1, 0, 2:4] - tracers[iy, ix+1, 0, 0:2] diff[2,:] = tracers[iy+1, ix+1, 0, 2:4] - tracers[iy+1, ix+1, 0, 0:2] diff[3,:] = tracers[iy+1, ix, 0, 2:4] - tracers[iy+1, ix, 0, 0:2] if (sum(np.sum(diff**2, axis = 1) != 0) == True): diff = np.swapaxes(np.swapaxes(diff, 0, 1) / np.sqrt(np.sum(diff**2, axis = 1)), 0, 1) poincare = pIndex(vv, p, tracers[iy, ix:ix+2, 0, 0], tracers[iy:iy+2, ix, 0, 1], diff, phiMin, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) if (abs(poincare) > 5): # use 5 instead of 2pi to account for rounding errors # subsample to get starting point for iteration nt = 4 xmin = tracers[iy, ix, 0, 0] ymin = tracers[iy, ix, 0, 1] xmax = tracers[iy, ix+1, 0, 0] ymax = tracers[iy+1, ix, 0, 1] xx = np.zeros((nt**2,3)) tracersSub = np.zeros((nt**2,5)) i1 = 0 for j1 in range(nt): for k1 in range(nt): xx[i1,0] = xmin + j1/(nt-1.)*(xmax - xmin) xx[i1,1] = ymin + k1/(nt-1.)*(ymax - ymin) xx[i1,2] = p.Oz i1 += 1 for it1 in range(nt**2): s = pc.stream(vv, p, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration, xx = xx[it1,:]) tracersSub[it1,0:2] = xx[it1,0:2] tracersSub[it1,2:] = s.tracers[s.sl-1,:] min2 = 1e6 minx = xmin miny = ymin i1 = 0 for j1 in range(nt): for k1 in range(nt): diff2 = (tracersSub[i1, 2] - tracersSub[i1, 0])**2 + (tracersSub[i1, 3] - tracersSub[i1, 1])**2 if (diff2 < min2): min2 = diff2 minx = xmin + j1/(nt-1.)*(xmax - xmin) miny = ymin + k1/(nt-1.)*(ymax - ymin) it1 += 1 # get fixed point from this starting position using Newton's method #TODO: dl = np.min(var.dx, var.dy)/100. # step-size for calculating the Jacobian by finite differences it = 0 # tracers used to find the fixed point tracersNull = np.zeros((5,4)) point = np.array([minx, miny]) while True: # trace field lines at original point and for Jacobian: # (second order seems to be enough) xx = np.zeros((5,3)) xx[0,:] = np.array([point[0], point[1], p.Oz]) xx[1,:] = np.array([point[0]-dl, point[1], p.Oz]) xx[2,:] = np.array([point[0]+dl, point[1], p.Oz]) xx[3,:] = np.array([point[0], point[1]-dl, p.Oz]) xx[4,:] = np.array([point[0], point[1]+dl, p.Oz]) for it1 in range(5): s = pc.stream(vv, p, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration, xx = xx[it1,:]) tracersNull[it1,:2] = xx[it1,:2] tracersNull[it1,2:] = s.tracers[s.sl-1,0:2] # check function convergence ff = np.zeros(2) ff[0] = tracersNull[0,2] - tracersNull[0,0] ff[1] = tracersNull[0,3] - tracersNull[0,1] #TODO: if (sum(abs(ff)) <= 1e-4): fixedPoint = np.array([point[0], point[1]]) break # compute the Jacobian fjac = np.zeros((2,2)) fjac[0,0] = ((tracersNull[2,2] - tracersNull[2,0]) - (tracersNull[1,2] - tracersNull[1,0]))/2./dl fjac[0,1] = ((tracersNull[4,2] - tracersNull[4,0]) - (tracersNull[3,2] - tracersNull[3,0]))/2./dl fjac[1,0] = ((tracersNull[2,3] - tracersNull[2,1]) - (tracersNull[1,3] - tracersNull[1,1]))/2./dl fjac[1,1] = ((tracersNull[4,3] - tracersNull[4,1]) - (tracersNull[3,3] - tracersNull[3,1]))/2./dl # invert the Jacobian fjin = np.zeros((2,2)) det = fjac[0,0]*fjac[1,1] - fjac[0,1]*fjac[1,0] #TODO: if (abs(det) < dl): fixedPoint = point break fjin[0,0] = fjac[1,1] fjin[1,1] = fjac[0,0] fjin[0,1] = -fjac[0,1] fjin[1,0] = -fjac[1,0] fjin = fjin/det dpoint = np.zeros(2) dpoint[0] = -fjin[0,0]*ff[0] - fjin[0,1]*ff[1] dpoint[1] = -fjin[1,0]*ff[0] - fjin[1,1]*ff[1] point += dpoint # check root convergence #TODO: if (sum(abs(dpoint)) < 1e-4): fixedPoint = point break if (it > 20): fixedPoint = point print("warning: Newton did not converged") break it += 1 # check if fixed point lies inside the cell if ((fixedPoint[0] < tracers[iy, ix, 0, 0]) or (fixedPoint[0] > tracers[iy, ix+1, 0, 0]) or (fixedPoint[1] < tracers[iy, ix, 0, 1]) or (fixedPoint[1] > tracers[iy+1, ix, 0, 1])): print("warning: fixed point lies outside the cell") else: x.append(fixedPoint[0]) y.append(fixedPoint[1]) #q.append() fidx += 1 queue.put((x, y, q, fidx, iproc)) # multi core setup if (np.isscalar(nproc) == False) or (nproc%1 != 0): print("error: invalid processor number") return -1 queue = mp.Queue() proc = [] # make sure to read the var files with the correct magic if (traceField == 'bb'): magic = 'bb' if (traceField == 'jj'): magic = 'jj' if (traceField == 'vort'): magic = 'vort' # read the cpu structure dim = pc.read_dim(datadir = datadir) if (dim.nprocz > 1): print("error: number of cores in z-direction > 1") var = pc.read_var(varfile = varfile, datadir = datadir, magic = magic, quiet = True, trimall = True) grid = pc.read_grid(datadir = datadir, quiet = True, trim = True) vv = getattr(var, traceField) # initialize the parameters p = pc.pClass() p.dx = var.dx; p.dy = var.dy; p.dz = var.dz p.Ox = var.x[0]; p.Oy = var.y[0]; p.Oz = var.z[0] p.Lx = grid.Lx; p.Ly = grid.Ly; p.Lz = grid.Lz p.nx = dim.nx; p.ny = dim.ny; p.nz = dim.nz # create the initial mapping tracers, mapping, t = pc.tracers(traceField = 'bb', hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, trace_sub = trace_sub, varfile = varfile, integration = integration, datadir = datadir, destination = '', nproc = nproc) # find fixed points fixed = pc.fixed_struct() xyq = [] # list of return values from subFixed ix0 = range(0,p.nx*trace_sub-1) # set of grid indices for the cores iy0 = range(0,p.ny*trace_sub-1) # set of grid indices for the cores subFixedLambda = lambda queue, ix0, iy0, vv, p, tracers, iproc: \ subFixed(queue, ix0, iy0, vv, p, tracers, iproc, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration) for iproc in range(nproc): proc.append(mp.Process(target = subFixedLambda, args = (queue, ix0[iproc::nproc], iy0, vv, p, tracers, iproc))) for iproc in range(nproc): proc[iproc].start() for iproc in range(nproc): xyq.append(queue.get()) for iproc in range(nproc): proc[iproc].join() # put together return values from subFixed fixed.fidx = 0 fixed.t = var.t for iproc in range(nproc): fixed.x.append(xyq[xyq[iproc][4]][0]) fixed.y.append(xyq[xyq[iproc][4]][1]) fixed.q.append(xyq[xyq[iproc][4]][2]) fixed.fidx += xyq[xyq[iproc][4]][3] fixed.t = np.array(fixed.t) fixed.x = np.array(fixed.x) fixed.y = np.array(fixed.y) fixed.q = np.array(fixed.q) fixed.fidx = np.array(fixed.fidx) return fixed
def __init__(self, dataDir = 'data/', fileName = 'var.dat', streamFile = 'stream.vtk', interpolation = 'weighted', integration = 'RK6', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, iterMax = 1e3, xx = np.array([0,0,0])): """ Creates, and returns the traced streamline. call signature: streamInit(datadir = 'data/', fileName = 'save.dat, interpolation = 'weighted', integration = 'simple', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, iterMax = 1e3, xx = np.array([0,0,0])) Trace magnetic streamlines. Keyword arguments: *dataDir*: Data directory. *fileName*: Name of the file with the field information. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'weighted': weights the adjacent grid points according to their distance. *integration*: Integration method. 'simple': low order method. 'RK6': Runge-Kutta 6th order. *hMin*: Minimum step length for and underflow to occur. *hMax*: Parameter for the initial step length. *lMax*: Maximum length of the streamline. Integration will stop if l >= lMax. *tol*: Tolerance for each integration step. Reduces the step length if error >= tol. *iterMax*: Maximum number of iterations. *xx*: Initial seeds. """ # read the data var = pc.read_var(datadir = dataDir, varfile = fileName, magic = 'bb', quiet = True, trimall = True) grid = pc.read_grid(datadir = dataDir, quiet = True) vv = var.bb p = pClass() p.dx = var.dx; p.dy = var.dy; p.dz = var.dz p.Ox = var.x[0]; p.Oy = var.y[0]; p.Oz = var.z[0] p.Lx = grid.Lx; p.Ly = grid.Ly; p.Lz = grid.Lz p.nx = var.bb.shape[1]; p.ny = var.bb.shape[2]; p.nz = var.bb.shape[3] ss = [] for i in range(xx.shape[1]): s = streamSingle(vv, p, interpolation = 'weighted', integration = 'simple', hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, iterMax = iterMax, xx = xx[:,i]) ss.append(s) slMax = 0 for i in range(xx.shape[1]): if (slMax < ss[i].sl): slMax = ss[i].sl self.tracers = np.zeros((xx.shape[1], slMax, 3)) + np.nan self.sl = np.zeros(xx.shape[1], dtype = 'int32') self.l = np.zeros(xx.shape[1]) for i in range(xx.shape[1]): self.tracers[i,:ss[i].sl,:] = ss[i].tracers self.sl[i] = ss[i].sl self.l[i] = ss[i].l self.p = s.p self.nt = xx.shape[1] # save into vtk file if (streamFile != []): writer = vtk.vtkPolyDataWriter() writer.SetFileName(dataDir + '/' + streamFile) polyData = vtk.vtkPolyData() fieldData = vtk.vtkFieldData() # field containing length of stream lines for later decomposition field = VN.numpy_to_vtk(self.l) field.SetName('l') fieldData.AddArray(field) field = VN.numpy_to_vtk(self.sl.astype(np.int32)) field.SetName('sl') fieldData.AddArray(field) # streamline parameters tmp = range(10) tmp[0] = np.array([hMin], dtype = 'float32'); field = VN.numpy_to_vtk(tmp[0]); field.SetName('hMin'); fieldData.AddArray(field) tmp[1] = np.array([hMax], dtype = 'float32'); field = VN.numpy_to_vtk(tmp[1]); field.SetName('hMax'); fieldData.AddArray(field) tmp[2] = np.array([lMax], dtype = 'float32'); field = VN.numpy_to_vtk(tmp[2]); field.SetName('lMax'); fieldData.AddArray(field) tmp[3] = np.array([tol], dtype = 'float32'); field = VN.numpy_to_vtk(tmp[3]); field.SetName('tol'); fieldData.AddArray(field) tmp[4] = np.array([iterMax], dtype = 'int32'); field = VN.numpy_to_vtk(tmp[4]); field.SetName('iterMax'); fieldData.AddArray(field) tmp[5] = np.array([self.nt], dtype = 'int32'); field = VN.numpy_to_vtk(tmp[5]); field.SetName('nt'); fieldData.AddArray(field) # fields containing simulation parameters stored in paramFile dic = dir(p) params = range(len(dic)) i = 0 for attr in dic: if( attr[0] != '_'): params[i] = getattr(p, attr) params[i] = np.array([params[i]], dtype = type(params[i])) field = VN.numpy_to_vtk(params[i]) field.SetName(attr) fieldData.AddArray(field) i += 1 # all streamlines as continuous array of points points = vtk.vtkPoints() for i in range(xx.shape[1]): for sl in range(self.sl[i]): points.InsertNextPoint(self.tracers[i,sl,:]) polyData.SetPoints(points) polyData.SetFieldData(fieldData) writer.SetInput(polyData) writer.SetFileTypeToBinary() writer.Write()
def slices2vtk( variables=["rho"], extensions=["xy", "xy2", "xz", "yz"], datadir="data/", destination="slices", proc=-1, format="native", ): """ Convert slices from PencilCode format to vtk. call signature:: slices2vtk(variables = ['rho'], extensions = ['xy', 'xy2', 'xz', 'yz'], datadir = 'data/', destination = 'slices', proc = -1, format = 'native'): Read slice files specified by *variables* and convert them into vtk format for the specified extensions. Write the result in *destination*. NB: You need to have called src/read_videofiles.x before using this script. Keyword arguments: *variables*: All allowed fields which can be written as slice files, e.g. b2, uu1, lnrho, ... See the pencil code manual for more (chapter: "List of parameters for `video.in'"). *extensions*: List of slice positions. *datadir*: Directory where the data is stored. *destination*: Destination files. *proc*: Processor which should be read. Set to -1 for all processors. *format*: Endian, one of little, big, or native (default) """ # this should correct for the case the user types only one variable if len(variables) > 0: if len(variables[0]) == 1: variables = [variables] # this should correct for the case the user types only one extension if len(extensions) > 0: if len(extensions[0]) == 1: extensions = [extensions] # read the grid dimensions grid = pc.read_grid(datadir=datadir, proc=proc, trim=True, quiet=True) # read the user given parameters for the slice positions params = pc.read_param(param2=True, quiet=True) # run through all specified variables for field in variables: # run through all specified extensions for ext in extensions: print("read ", field, " ", ext) slices, t = pc.read_slices(field=field, datadir=datadir, proc=proc, extension=ext, format=format) dim_p = slices.shape[2] dim_q = slices.shape[1] if ext[0] == "x": d_p = (np.max(grid.x) - np.min(grid.x)) / (dim_p) else: d_p = (np.max(grid.y) - np.min(grid.y)) / (dim_p) if ext[1] == "y": d_q = (np.max(grid.y) - np.min(grid.y)) / (dim_q) else: d_q = (np.max(grid.z) - np.min(grid.z)) / (dim_q) if params.ix != -1: x0 = grid.x[params.ix] elif params.slice_position == "m": x0 = grid.x[len(grid.x) / 2] if params.iy != -1: y0 = grid.y[params.iy] elif params.slice_position == "m": y0 = grid.y[len(grid.y) / 2] if params.iz != -1: z0 = grid.z[params.iz] elif params.slice_position == "m": z0 = grid.z[len(grid.z) / 2] for i in range(slices.shape[0]): # open the destination file for writing fd = open(destination + "_" + field + "_" + ext + "_" + str(i) + ".vtk", "wb") # write the header fd.write("# vtk DataFile Version 2.0\n") fd.write(field + "_" + ext + "\n") fd.write("BINARY\n") fd.write("DATASET STRUCTURED_POINTS\n") if ext[0:2] == "xy": x0 = grid.x[0] y0 = grid.y[0] fd.write("DIMENSIONS {0:9} {1:9} {2:9}\n".format(dim_p, dim_q, 1)) fd.write("ORIGIN {0:8.12} {1:8.12} {2:8.12}\n".format(x0, y0, z0)) fd.write("SPACING {0:8.12} {1:8.12} {2:8.12}\n".format(grid.dx, grid.dy, 1.0)) elif ext[0:2] == "xz": x0 = grid.x[0] z0 = grid.z[0] fd.write("DIMENSIONS {0:9} {1:9} {2:9}\n".format(dim_p, 1, dim_q)) fd.write("ORIGIN {0:8.12} {1:8.12} {2:8.12}\n".format(x0, y0, z0)) fd.write("SPACING {0:8.12} {1:8.12} {2:8.12}\n".format(grid.dx, 1.0, grid.dy)) elif ext[0:2] == "yz": y0 = grid.y[0] z0 = grid.z[0] fd.write("DIMENSIONS {0:9} {1:9} {2:9}\n".format(1, dim_p, dim_q)) fd.write("ORIGIN {0:8.12} {1:8.12} {2:8.12}\n".format(x0, y0, z0)) fd.write("SPACING {0:8.12} {1:8.12} {2:8.12}\n".format(1.0, grid.dy, grid.dy)) fd.write("POINT_DATA {0:9}\n".format(dim_p * dim_q)) fd.write("SCALARS " + field + "_" + ext + " float\n") fd.write("LOOKUP_TABLE default\n") for j in range(dim_q): for k in range(dim_p): fd.write(struct.pack(">f", slices[i, j, k])) fd.close()
#!/usr/bin/env python import pencil as pc P=pc.P dim=pc.read_dim() index=pc.read_index() param=pc.read_param(quiet=True) grid=pc.read_grid(trim=True,param=param,quiet=True) P.ion() P.figure(figsize=(6,6),dpi=64) frame=grid.x.min(),grid.x.max(),grid.y.min(),grid.y.max() P.subplots_adjust(bottom=0,top=1,left=0,right=1) x0=grid.x.mean() P.axvline(x0+0.5,color='black',linestyle='--') P.axvline(x0-0.5,color='black',linestyle='--') P.axhline(0.5,color='black',linestyle='--') P.axhline(-0.5,color='black',linestyle='--') for ivar in range(0,8): print "read VAR%d"%ivar var=pc.read_var(ivar=ivar,run2D=param.lwrite_2d,param=param,dim=dim,index=index,quiet=True,trimall=True) f=var.lnrho[dim.nz/2,...] # acceleration using an handle if (ivar==0): im=P.imshow(f,extent=frame,origin='lower',aspect='auto') else: im.set_data(f) im.set_clim(f.min(),f.max())
def power2vtk(powerfiles=["power_mag.dat"], datadir="data/", destination="power", thickness=1): """ Convert power spectra from PencilCode format to vtk. call signature:: power2vtk(powerfiles = ['power_mag.dat'], datadir = 'data/', destination = 'power.vtk', thickness = 1): Read the power spectra stored in the power*.dat files and convert them into vtk format. Write the result in *destination*. Keyword arguments: *powerfiles*: The files containing the power spectra. *datadir*: Directory where the data is stored. *destination*: Destination file. *thickness*: Dimension in z-direction. Setting it 2 will create n*m*2 dimensional array of data. This is useful in Paraview for visualizing the spectrum in 3 dimensions. Note that this will simply double the amount of data. """ # this should correct for the case the user types only one variable if len(powerfiles) > 0: if len(powerfiles[0]) == 1: powerfiles = [powerfiles] # read the grid dimensions grid = pc.read_grid(datadir=datadir, trim=True, quiet=True) # leave k0 to 1 now, will fix this later k0 = 1.0 # leave dk to 1 now, will fix this later dk = 1.0 # open the destination file fd = open(destination + ".vtk", "wb") # read the first power spectrum t, power = pc.read_power(datadir + powerfiles[0]) fd.write("# vtk DataFile Version 2.0\n") fd.write("power spectra\n") fd.write("BINARY\n") fd.write("DATASET STRUCTURED_POINTS\n") if thickness == 1: fd.write("DIMENSIONS {0:9} {1:9} {2:9}\n".format(len(t), power.shape[1], 1)) else: fd.write("DIMENSIONS {0:9} {1:9} {2:9}\n".format(len(t), power.shape[1], 2)) fd.write("ORIGIN {0:8.12} {1:8.12} {2:8.12}\n".format(float(t[0]), k0, 0.0)) fd.write("SPACING {0:8.12} {1:8.12} {2:8.12}\n".format(t[1] - t[0], dk, 1.0)) if thickness == 1: fd.write("POINT_DATA {0:9}\n".format(power.shape[0] * power.shape[1])) else: fd.write("POINT_DATA {0:9}\n".format(power.shape[0] * power.shape[1] * 2)) for powfile in powerfiles: # read the power spectrum t, power = pc.read_power(datadir + powfile) fd.write("SCALARS " + powfile[:-4] + " float\n") fd.write("LOOKUP_TABLE default\n") if thickness == 1: for j in range(power.shape[1]): for i in range(len(t)): fd.write(struct.pack(">f", power[i, j])) else: for k in [1, 2]: for j in range(power.shape[1]): for i in range(len(t)): fd.write(struct.pack(">f", power[i, j])) fd.close()
coordsystem = 99 ydim = unit_length zdim = unit_length elif (par.coord_system == 'cylindric'): coordsystem = 200 ydim = 1. zdim = unit_length elif (par.coord_system == 'spherical'): coordsystem = 100 ydim = 1. zdim = 1. else: print "the world is flat and we never got here" #break grid = pc.read_grid(trim=True, datadir=datadir) dim = pc.read_dim(datadir=datadir) iformat = 1 grid_style = 0 gridinfo = 0 if (dim.nx > 1): incl_x = 1 dx = np.gradient(grid.x) else: incl_x = 0 dx = np.repeat(grid.dx, dim.nx) if (dim.ny > 1): incl_y = 1 dy = np.gradient(grid.y) else: