def process_tavg(args): infs = {} # Interrogate files passed by the shell for fname in args.infs: # Load solution files and obtain solution times inf = read_pyfr_data(fname) tinf = Inifile(inf['stats']).getfloat('solver-time-integrator', 'tcurr') # Retain if solution time is within limits if args.limits is None or args.limits[0] <= tinf <= args.limits[1]: infs[tinf] = inf # Verify that solutions were computed on the same mesh if inf['mesh_uuid'] != infs[infs.keys()[0]]['mesh_uuid']: raise RuntimeError('Solution files in scope were not computed ' 'on the same mesh') # Sort the solution times, check for sufficient files in scope stimes = sorted(infs.keys()) if len(infs) <= 1: raise RuntimeError('More than one solution file is required to ' 'compute an average') # Initialise progress bar, and the average with first solution pb = ProgressBar(0, 0, len(stimes), 0) avgs = {name: infs[stimes[0]][name].copy() for name in infs[stimes[0]]} solnfs = [name for name in avgs.keys() if name.startswith('soln')] # Weight the initialised trapezoidal mean dtnext = stimes[1] - stimes[0] for name in solnfs: avgs[name] *= 0.5 * dtnext pb.advance_to(1) # Compute the trapezoidal mean up to the last solution file for i in xrange(len(stimes[2:])): dtlast = dtnext dtnext = stimes[i + 2] - stimes[i + 1] # Weight the current solution, then add to the mean for name in solnfs: avgs[name] += 0.5 * (dtlast + dtnext) * infs[stimes[i + 1]][name] pb.advance_to(i + 2) # Weight final solution, update mean and normalise for elapsed time for name in solnfs: avgs[name] += 0.5 * dtnext * infs[stimes[-1]][name] avgs[name] *= 1.0 / (stimes[-1] - stimes[0]) pb.advance_to(i + 3) # Compute and assign stats for a time-averaged solution stats = Inifile() stats.set('time-average', 'tmin', stimes[0]) stats.set('time-average', 'tmax', stimes[-1]) stats.set('time-average', 'ntlevels', len(stimes)) avgs['stats'] = stats.tostr() outf = open(args.outf, 'wb') np.savez(outf, **avgs)
class BaseWriter(object): def __init__(self, args): from pyfr.solvers.base import BaseSystem self.outf = args.outf # Load the mesh and solution files self.soln = NativeReader(args.solnf) self.mesh = NativeReader(args.meshf) # Check solution and mesh are compatible if self.mesh['mesh_uuid'] != self.soln['mesh_uuid']: raise RuntimeError('Solution "%s" was not computed on mesh "%s"' % (args.solnf, args.meshf)) # Load the configuration and stats files self.cfg = Inifile(self.soln['config']) self.stats = Inifile(self.soln['stats']) # Data file prefix (defaults to soln for backwards compatibility) self.dataprefix = self.stats.get('data', 'prefix', 'soln') # Get element types and array shapes self.mesh_inf = self.mesh.array_info('spt') self.soln_inf = self.soln.array_info(self.dataprefix) # Dimensions self.ndims = next(iter(self.mesh_inf.values()))[1][2] self.nvars = next(iter(self.soln_inf.values()))[1][1] # System and elements classes self.systemscls = subclass_where( BaseSystem, name=self.cfg.get('solver', 'system') ) self.elementscls = self.systemscls.elementscls
class BaseWriter(object): def __init__(self, args): from pyfr.solvers.base import BaseSystem self.outf = args.outf # Load the mesh and solution files self.soln = NativeReader(args.solnf) self.mesh = NativeReader(args.meshf) # Check solution and mesh are compatible if self.mesh['mesh_uuid'] != self.soln['mesh_uuid']: raise RuntimeError('Solution "%s" was not computed on mesh "%s"' % (args.solnf, args.meshf)) # Load the configuration and stats files self.cfg = Inifile(self.soln['config']) self.stats = Inifile(self.soln['stats']) # Data file prefix (defaults to soln for backwards compatibility) self.dataprefix = self.stats.get('data', 'prefix', 'soln') # Get element types and array shapes self.mesh_inf = self.mesh.array_info('spt') self.soln_inf = self.soln.array_info(self.dataprefix) # Dimensions self.ndims = next(iter(self.mesh_inf.values()))[1][2] self.nvars = next(iter(self.soln_inf.values()))[1][1] # System and elements classes self.systemscls = subclass_where(BaseSystem, name=self.cfg.get('solver', 'system')) self.elementscls = self.systemscls.elementscls
def parse(self, cmd_args): self.args = self.ap.parse_args(cmd_args) if self.args.cfg: self.cfg = Inifile.load(self.args.cfg) else: soln = NativeReader(self.args.soln) self.cfg = Inifile(soln['config'])
def _load_eles(self, rallocs, mesh, initsoln, nregs, nonce): basismap = {b.name: b for b in subclasses(BaseShape, just_leaf=True)} # Look for and load each element type from the mesh elemap = {} for f in mesh: m = re.match(f'spt_(.+?)_p{rallocs.prank}$', f) if m: # Element type t = m.group(1) elemap[t] = self.elementscls(basismap[t], mesh[f], self.cfg) # Construct a proxylist to simplify collective operations eles = proxylist(elemap.values()) # Set the initial conditions if initsoln: # Load the config and stats files from the solution solncfg = Inifile(initsoln['config']) solnsts = Inifile(initsoln['stats']) # Get the names of the conserved variables (fields) solnfields = solnsts.get('data', 'fields', '') currfields = ','.join(eles[0].convarmap[eles[0].ndims]) # Ensure they match up if solnfields and solnfields != currfields: raise RuntimeError('Invalid solution for system') # Process the solution for etype, ele in elemap.items(): soln = initsoln[f'soln_{etype}_p{rallocs.prank}'] ele.set_ics_from_soln(soln, solncfg) else: eles.set_ics_from_cfg() # Allocate these elements on the backend for etype, ele in elemap.items(): k = f'spt_{etype}_p{rallocs.prank}' try: curved = ~mesh[k, 'linear'] linoff = np.max(*np.nonzero(curved), initial=-1) + 1 except KeyError: linoff = ele.neles ele.set_backend(self.backend, nregs, nonce, linoff) return eles, elemap
def __init__(self, backend, rallocs, mesh, initsoln, cfg): self.backend = backend self.rallocs = rallocs self.isrestart = initsoln is not None self.cfg = cfg self.prevcfgs = { f: initsoln[f] for f in initsoln or [] if f.startswith('config-') } # Start time self.tstart = cfg.getfloat('solver-time-integrator', 'tstart', 0.0) self.tend = cfg.getfloat('solver-time-integrator', 'tend') # Current time; defaults to tstart unless restarting if self.isrestart: stats = Inifile(initsoln['stats']) self.tcurr = stats.getfloat('solver-time-integrator', 'tcurr') else: self.tcurr = self.tstart # List of target times to advance to self.tlist = deque([self.tend]) # Accepted and rejected step counters self.nacptsteps = 0 self.nrjctsteps = 0 self.nacptchain = 0 # Current and minimum time steps self._dt = cfg.getfloat('solver-time-integrator', 'dt') self.dtmin = cfg.getfloat('solver-time-integrator', 'dt-min', 1e-12) # Extract the UUID of the mesh (to be saved with solutions) self.mesh_uuid = mesh['mesh_uuid'] # Get a queue for subclasses to use self._queue = backend.queue() # Solution cache self._curr_soln = None # Solution gradients cache self._curr_grad_soln = None # Record the starting wall clock time self._wstart = time.time() # Abort computation self.abort = False
def process_tavg(args): infs = {} # Interrogate files passed by the shell for fname in args.infs: # Load solution files and obtain solution times inf = read_pyfr_data(fname) tinf = Inifile(inf['stats']).getfloat('solver-time-integrator', 'tcurr') # Retain if solution time is within limits if args.limits is None or args.limits[0] <= tinf <= args.limits[1]: infs[tinf] = inf # Verify that solutions were computed on the same mesh if inf['mesh_uuid'] != infs[infs.keys()[0]]['mesh_uuid']: raise RuntimeError('Solution files in scope were not computed ' 'on the same mesh') # Sort the solution times, check for sufficient files in scope stimes = sorted(infs.keys()) if len(infs) <= 1: raise RuntimeError('More than one solution file is required to ' 'compute an average') # Initialise progress bar, and the average with first solution pb = ProgressBar(0, 0, len(stimes), 0) avgs = {name: infs[stimes[0]][name].copy() for name in infs[stimes[0]]} solnfs = [name for name in avgs.keys() if name.startswith('soln')] # Weight the initialised trapezoidal mean dtnext = stimes[1] - stimes[0] for name in solnfs: avgs[name] *= 0.5*dtnext pb.advance_to(1) # Compute the trapezoidal mean up to the last solution file for i in xrange(len(stimes[2:])): dtlast = dtnext dtnext = stimes[i+2] - stimes[i+1] # Weight the current solution, then add to the mean for name in solnfs: avgs[name] += 0.5*(dtlast + dtnext)*infs[stimes[i+1]][name] pb.advance_to(i+2) # Weight final solution, update mean and normalise for elapsed time for name in solnfs: avgs[name] += 0.5*dtnext*infs[stimes[-1]][name] avgs[name] *= 1.0/(stimes[-1] - stimes[0]) pb.advance_to(i+3) # Compute and assign stats for a time-averaged solution stats = Inifile() stats.set('time-average', 'tmin', stimes[0]) stats.set('time-average', 'tmax', stimes[-1]) stats.set('time-average', 'ntlevels', len(stimes)) avgs['stats'] = stats.tostr() outf = open(args.outf, 'wb') np.savez(outf, **avgs)
def __init__(self, backend, systemcls, rallocs, mesh, initsoln, cfg): from mpi4py import MPI self.backend = backend self.rallocs = rallocs self.cfg = cfg # Sanity checks if self._controller_needs_errest and not self._stepper_has_errest: raise TypeError('Incompatible stepper/controller combination') # Start time self.tstart = cfg.getfloat('solver-time-integrator', 't0', 0.0) # Output times self.tout = sorted(range_eval(cfg.get('soln-output', 'times'))) self.tend = self.tout[-1] # Current time; defaults to tstart unless resuming a simulation if initsoln is None or 'stats' not in initsoln: self.tcurr = self.tstart else: stats = Inifile(initsoln['stats']) self.tcurr = stats.getfloat('solver-time-integrator', 'tcurr') # Cull already written output times self.tout = [t for t in self.tout if t > self.tcurr] # Ensure no time steps are in the past if self.tout[0] < self.tcurr: raise ValueError('Output times must be in the future') # Determine the amount of temp storage required by thus method nreg = self._stepper_nregs # Construct the relevant mesh partition self._system = systemcls(backend, rallocs, mesh, initsoln, nreg, cfg) # Extract the UUID of the mesh (to be saved with solutions) self._mesh_uuid = mesh['mesh_uuid'] # Get a queue for subclasses to use self._queue = backend.queue() # Get the number of degrees of freedom in this partition ndofs = sum(self._system.ele_ndofs) # Sum to get the global number over all partitions self._gndofs = MPI.COMM_WORLD.allreduce(ndofs, op=MPI.SUM)
def process_restart(args): mesh = NativeReader(args.mesh) soln = NativeReader(args.soln) # Ensure the solution is from the mesh we are using if soln['mesh_uuid'] != mesh['mesh_uuid']: raise RuntimeError('Invalid solution for mesh.') # Process the config file if args.cfg: cfg = Inifile.load(args.cfg) else: cfg = Inifile(soln['config']) _process_common(args, mesh, soln, cfg)
def process_restart(args): mesh = read_pyfr_data(args.mesh) soln = read_pyfr_data(args.soln) # Ensure the solution is from the mesh we are using if soln['mesh_uuid'] != mesh['mesh_uuid']: raise RuntimeError('Invalid solution for mesh.') # Process the config file if args.cfg: cfg = Inifile.load(args.cfg) else: cfg = Inifile(soln['config']) return mesh, soln, cfg
def _load_eles(self, rallocs, mesh, initsoln): basismap = {b.name: b for b in subclasses(BaseShape, just_leaf=True)} # Look for and load each element type from the mesh elemap = OrderedDict() for f in mesh: m = re.match('spt_(.+?)_p%d$' % rallocs.prank, f) if m: # Element type t = m.group(1) elemap[t] = self.elementscls(basismap[t], mesh[f], self._cfg) # Construct a proxylist to simplify collective operations eles = proxylist(elemap.values()) # Set the initial conditions either from a pyfrs file or from # explicit expressions in the config file if initsoln: # Load the config used to produce the solution solncfg = Inifile(initsoln['config']) # Process the solution for k, ele in elemap.iteritems(): soln = initsoln['soln_%s_p%d' % (k, rallocs.prank)] ele.set_ics_from_soln(soln, solncfg) else: eles.set_ics_from_cfg() # Allocate these elements on the backend eles.set_backend(self._backend, self._nreg) return eles, elemap
def __init__(self, args): """Loads PyFR mesh and solution files A check is made to ensure the solution was computed on the mesh. :param args: Command line arguments passed from scripts/postp.py :type args: class 'argparse.Namespace' """ self.args = args self.outf = args.outf # Load mesh and solution files self.soln = read_pyfr_data(args.solnf) self.mesh = read_pyfr_data(args.meshf) # Get element types and array shapes self.mesh_inf = self.mesh.array_info self.soln_inf = self.soln.array_info # Check solution and mesh are compatible if self.mesh['mesh_uuid'] != self.soln['mesh_uuid']: raise RuntimeError('Solution "%s" was not computed on mesh "%s"' % (args.solnf, args.meshf)) # Load config file self.cfg = Inifile(self.soln['config'])
def __call__(self, intg): if intg.tcurr - self.tout_last < self.dt_out - self.tol: return stats = Inifile() stats.set('data', 'fields', ','.join(self.fields)) stats.set('data', 'prefix', 'soln') intg.collect_stats(stats) # Prepare the metadata metadata = dict(intg.cfgmeta, stats=stats.tostr(), mesh_uuid=intg.mesh_uuid) # Prepare the data itself data = self._prepare_data(intg) # Write out the file solnfname = self._writer.write(data, metadata, intg.tcurr) # If a post-action has been registered then invoke it self._invoke_postaction(mesh=intg.system.mesh.fname, soln=solnfname, t=intg.tcurr) # Update the last output time self.tout_last = intg.tcurr
def _load_eles(self, rallocs, mesh, initsoln, nregs, nonce): basismap = {b.name: b for b in subclasses(BaseShape, just_leaf=True)} # Look for and load each element type from the mesh elemap = OrderedDict() for f in mesh: m = re.match('spt_(.+?)_p{0}$'.format(rallocs.prank), f) if m: # Element type t = m.group(1) elemap[t] = self.elementscls(basismap[t], mesh[f], self.cfg) # Construct a proxylist to simplify collective operations eles = proxylist(elemap.values()) # Set the initial conditions if initsoln: # Load the config and stats files from the solution solncfg = Inifile(initsoln['config']) solnsts = Inifile(initsoln['stats']) # Get the names of the conserved variables (fields) solnfields = solnsts.get('data', 'fields', '') currfields = ','.join(eles[0].convarmap[eles[0].ndims]) # Ensure they match up if solnfields and solnfields != currfields: raise RuntimeError('Invalid solution for system') # Process the solution for etype, ele in elemap.items(): soln = initsoln['soln_{0}_p{1}'.format(etype, rallocs.prank)] ele.set_ics_from_soln(soln, solncfg) else: eles.set_ics_from_cfg() # Compute the index of first strictly interior element intoffs = self._compute_int_offsets(rallocs, mesh) # Allocate these elements on the backend for etype, ele in elemap.items(): ele.set_backend(self.backend, nregs, nonce, intoffs[etype]) return eles, elemap
def __call__(self, intg): if intg.tcurr - self.tout_last < self.dt_out - self.tol: return stats = Inifile() stats.set('data', 'fields', ','.join(self.fields)) stats.set('data', 'prefix', 'soln') intg.collect_stats(stats) # Prepare the metadata metadata = dict(intg.cfgmeta, stats=stats.tostr(), mesh_uuid=intg.mesh_uuid) # Extract and subset the solution soln = [intg.soln[i][..., rgn] for i, rgn in self._ele_regions] # Add in any required region data data = self._add_region_data(soln) # Write out the file solnfname = self._writer.write(data, metadata, intg.tcurr) # If a post-action has been registered then invoke it self._invoke_postaction(mesh=intg.system.mesh.fname, soln=solnfname, t=intg.tcurr) # Update the last output time self.tout_last = intg.tcurr
def Read_solution(time_step, K=None): nx = 62 ny = 19 nz = 60 dire = rep + 'solutions/Channel' filename = dire + f'-{time_step:010.4f}.pyfrs' soln = NativeReader(filename) cfg = Inifile(soln['stats']) vari = cfg.get('data', 'fields') vari = [s.strip() for s in vari.split(',')] if K == None: K = input(f'variables {vari} :') print(f'variable selected {vari[int(K)]}') re = h5py.File(filename, 'r') sol = [] for i in range(npart): part = f'soln_hex_p{i}' tmp = re[part] gtmp = tmp[()] if i == 0: sol = gtmp else: sol = np.concatenate((sol, gtmp), 2) sol = sol[:, :, Mesh] nk, nv, _ = sol.shape[:] sol = np.reshape(sol, (nk, nv, ny, nz, nx), order='F') u = sol[:, int(K), :, :, :] n, ny, nz, nx = u.shape[:] n = int(np.asarray(np.cbrt(n), dtype=int)) u = np.reshape(u, (n, n**2, ny, nz, nx), order='F') u = np.transpose(u, (1, 0, 2, 3, 4)) u = np.reshape(u, (n, n, n, ny, nz, nx), order='F') u = np.transpose(u, (0, 3, 1, 2, 4, 5)) u = np.reshape(u, (n * ny, n, n, nz, nx), order='F') u = np.squeeze(u[:, :, :, ::-1, :]) u = np.reshape(u, (n * ny, n, nz * n, nx), order='F') u = np.transpose(u, (0, 2, 1, 3)) u = np.reshape(u, (n * ny, nz * n, nx * n), order='F') u = np.transpose(u, (2, 0, 1)) return u
def __init__(self, backend, rallocs, mesh, initsoln, cfg): self.backend = backend self.rallocs = rallocs self.isrestart = initsoln is not None self.cfg = cfg self.prevcfgs = {f: initsoln[f] for f in initsoln or [] if f.startswith('config-')} # Start time self.tstart = cfg.getfloat('solver-time-integrator', 'tstart', 0.0) self.tend = cfg.getfloat('solver-time-integrator', 'tend') # Current time; defaults to tstart unless restarting if self.isrestart: stats = Inifile(initsoln['stats']) self.tcurr = stats.getfloat('solver-time-integrator', 'tcurr') else: self.tcurr = self.tstart # List of target times to advance to self.tlist = deque([self.tend]) # Accepted and rejected step counters self.nacptsteps = 0 self.nrjctsteps = 0 self.nacptchain = 0 # Current and minimum time steps self._dt = cfg.getfloat('solver-time-integrator', 'dt') self.dtmin = cfg.getfloat('solver-time-integrator', 'dt-min', 1e-12) # Extract the UUID of the mesh (to be saved with solutions) self.mesh_uuid = mesh['mesh_uuid'] # Get a queue for subclasses to use self._queue = backend.queue() # Solution cache self._curr_soln = None # Add kernel cache self._axnpby_kerns = {} # Record the starting wall clock time self._wstart = time.time()
def Read_solutions(time_step,Mesh=Mesh): #Mesh,Lx,Ly,Lz =extract_grid() nx=62 ny=19 nz=60 filename = f'Channel-{time_step:010.4f}.pyfrs' url=f'DNS-1/2/Channel_180/snapshots/Channel-{time_step:010.4f}.pyfrs' bucket.download_file(url,f'Channel-{time_step:010.4f}.pyfrs') soln = NativeReader(filename) cfg=Inifile(soln['stats']) vari=cfg.get('data','fields') vari = [s.strip() for s in vari.split(',')] re = h5py.File(filename, 'r') sol=[] for i in range(npart): part=f'soln_hex_p{i}' tmp=re[part] gtmp=tmp[()] if i==0: sol= gtmp else: sol=np.concatenate( (sol, gtmp),2) sol=sol[:,:,Mesh] nk,nv,_=sol.shape[:] sol=np.reshape(sol,(nk,nv,ny,nz,nx),order='F') rho=build_fields(sol,0) rhou=build_fields(sol,1) rhov=build_fields(sol,2) rhow=build_fields(sol,3) E=build_fields(sol,4) os.remove(filename) return rho,rhou,rhov,rhow,E
def __init__(self, backend, systemcls, rallocs, mesh, initsoln, cfg): self.backend = backend self.rallocs = rallocs self.cfg = cfg self.isrestart = initsoln is not None # Sanity checks if self._controller_needs_errest and not self._stepper_has_errest: raise TypeError('Incompatible stepper/controller combination') # Start time self.tstart = cfg.getfloat('solver-time-integrator', 'tstart', 0.0) self.tend = cfg.getfloat('solver-time-integrator', 'tend') # Current time; defaults to tstart unless restarting if self.isrestart: stats = Inifile(initsoln['stats']) self.tcurr = stats.getfloat('solver-time-integrator', 'tcurr') else: self.tcurr = self.tstart self.tlist = deque([self.tend]) # Determine the amount of temp storage required by thus method nreg = self._stepper_nregs # Construct the relevant mesh partition self.system = systemcls(backend, rallocs, mesh, initsoln, nreg, cfg) # Extract the UUID of the mesh (to be saved with solutions) self.mesh_uuid = mesh['mesh_uuid'] # Get a queue for subclasses to use self._queue = backend.queue() # Get the number of degrees of freedom in this partition ndofs = sum(self.system.ele_ndofs) comm, rank, root = get_comm_rank_root() # Sum to get the global number over all partitions self._gndofs = comm.allreduce(ndofs, op=get_mpi('sum'))
def _load_eles(self, rallocs, mesh, initsoln, nreg): basismap = {b.name: b for b in subclasses(BaseShape, just_leaf=True)} # Look for and load each element type from the mesh elemap = OrderedDict() for f in mesh: m = re.match('spt_(.+?)_p%d$' % rallocs.prank, f) if m: # Element type t = m.group(1) elemap[t] = self.elementscls(basismap[t], mesh[f], self.cfg) # Construct a proxylist to simplify collective operations eles = proxylist(elemap.values()) # Set the initial conditions either from a pyfrs file or from # explicit expressions in the config file if initsoln: # Load the config and stats files from the solution solncfg = Inifile(initsoln['config']) solnsts = Inifile(initsoln['stats']) # Get the names of the conserved variables (fields) solnfields = solnsts.get('data', 'fields', '') currfields = ','.join(eles[0].convarmap[eles[0].ndims]) # Ensure they match up if solnfields and solnfields != currfields: raise RuntimeError('Invalid solution for system') # Process the solution for k, ele in elemap.items(): soln = initsoln['soln_%s_p%d' % (k, rallocs.prank)] ele.set_ics_from_soln(soln, solncfg) else: eles.set_ics_from_cfg() # Allocate these elements on the backend eles.set_backend(self.backend, nreg) return eles, elemap
def save_solution(self, savedir, basename, t=0): ndims = self.solver.system.ndims nvars = self.solver.system.nvars writer = NativeWriter(self.solver, nvars, savedir, basename, prefix='soln') fields = self.solver.system.elementscls.convarmap[ndims] stats = Inifile() stats.set('data', 'fields', ','.join(fields)) stats.set('data', 'prefix', 'soln') self.solver.collect_stats(stats) stats.set('solver-time-integrator', 'tcurr', str(t)) metadata = dict(self.solver.cfgmeta, stats=stats.tostr(), mesh_uuid=self.solver.mesh_uuid) writer.write(self.solver.soln, metadata, t)
def run(self): for t in self.tout: # Advance to time t solns = self.advance_to(t) # Map solutions to elements types solnmap = OrderedDict(zip(self._system.ele_types, solns)) # Collect statistics stats = Inifile() self.collect_stats(stats) # Output self.output(solnmap, stats)
def process_restart(args): mesh = read_pyfr_data(args.mesh) soln = read_pyfr_data(args.soln) # Ensure the solution is from the mesh we are using if soln["mesh_uuid"] != mesh["mesh_uuid"]: raise RuntimeError("Invalid solution for mesh.") # Process the config file if args.cfg: cfg = Inifile.load(args.cfg) else: cfg = Inifile(soln["config"]) _process_common(args, mesh, soln, cfg)
def __call__(self, intg): dowrite = abs(self.tout - intg.tcurr) < self.tol doaccum = intg.nacptsteps % self.nsteps == 0 if dowrite or doaccum: # Evaluate the time averaging expressions currex = self._eval_exprs(intg) # Accumulate them; always do this even when just writing for a, p, c in zip(self.accmex, self.prevex, currex): a += 0.5*(intg.tcurr - self.prevt)*(p + c) # Save the time and solution self.prevt = intg.tcurr self.prevex = currex if dowrite: # Normalise accmex = [a / self.dtout for a in self.accmex] stats = Inifile() stats.set('data', 'prefix', 'tavg') stats.set('data', 'fields', ','.join(k for k, v in self.exprs)) stats.set('tavg', 'tstart', intg.tcurr - self.dtout) stats.set('tavg', 'tend', intg.tcurr) intg.collect_stats(stats) metadata = dict(intg.cfgmeta, stats=stats.tostr(), mesh_uuid=intg.mesh_uuid) self._writer.write(accmex, metadata, intg.tcurr) self.tout = intg.tcurr + self.dtout self.accmex = [np.zeros_like(a) for a in accmex]
def test_hex_gleg_ord3(): # Config for a third order DG scheme cfg = Inifile() cfg.set('solver', 'order', '3') cfg.set('solver-interfaces-quad', 'flux-pts', 'gauss-legendre') cfg.set('solver-elements-hex', 'soln-pts', 'gauss-legendre') # Generate the shape hs = HexShape(None, cfg) # Load and import the reference values fobj = BytesIO(pkgutil.get_data(__name__, 'hex-gleg-ord3.npz')) refm = np.load(fobj) assert np.allclose(refm['m0'], hs.m0) assert np.allclose(refm['m1'], hs.m1) assert np.allclose(refm['m2'], hs.m2) assert np.allclose(refm['m3'], hs.m3)
def __init__(self, backend, systemcls, rallocs, mesh, initsoln, cfg): sect = 'solver-dual-time-integrator-multip' # Get the solver order self._order = cfg.getint('solver', 'order') # Get the multigrid cycle self.cycle, self.csteps = zip(*cfg.getliteral(sect, 'cycle')) self.levels = sorted(set(self.cycle), reverse=True) self.level = self._order if max(self.cycle) > self._order: raise ValueError('The multigrid level orders cannot exceed ' 'the solution order') if any(abs(i - j) > 1 for i, j in zip(self.cycle, self.cycle[1:])): raise ValueError('The orders of consecutive multigrid levels can ' 'only change by one') if self.cycle[0] != self._order or self.cycle[-1] != self._order: raise ValueError('The multigrid cycle needs to start end with the ' 'highest (solution) order ') # Multigrid pseudo-time steps dtau = cfg.getfloat('solver-time-integrator', 'pseudo-dt') dtauf = cfg.getfloat(sect, 'pseudo-dt-fact', 1.0) self.dtaus = {l: dtau * dtauf**(self._order - l) for l in self.levels} # Generate suitable config files for lower multigrid levels self._mgcfgs = {l: Inifile(cfg.tostr()) for l in self.levels[1:]} for l, mgcfg in self._mgcfgs.items(): mgcfg.set('solver', 'order', l) for sec in cfg.sections(): m = re.match(r'solver-(.*)-mg-p{0}'.format(l), sec) if m: mgcfg.rename_section(m.group(0), 'solver-' + m.group(1)) # Insert the original config file to the multigrid config dictionary self._mgcfgs[self._order] = cfg super().__init__(backend, systemcls, rallocs, mesh, initsoln, cfg) # Delete remaining elements maps from multigrid systems for l in self.levels[1:]: del self._mgsystems[l].ele_map
def __call__(self, intg): if abs(self.tout_next - intg.tcurr) > intg.dtmin: return stats = Inifile() stats.set('data', 'fields', ','.join(self.fields)) stats.set('data', 'prefix', 'soln') intg.collect_stats(stats) metadata = dict(config=self.cfg.tostr(), stats=stats.tostr(), mesh_uuid=intg.mesh_uuid) self._writer.write(intg.soln, metadata, intg.tcurr) self.tout_next += self.dt_out
def __call__(self, intg): if intg.tcurr - self.tout_last < self.dt_out - self.tol: return comm, rank, root = get_comm_rank_root() # If we are the root rank then prepare the metadata if rank == root: stats = Inifile() stats.set('data', 'fields', ','.join(self.fields)) stats.set('data', 'prefix', 'soln') intg.collect_stats(stats) metadata = dict(intg.cfgmeta, stats=stats.tostr(), mesh_uuid=intg.mesh_uuid) else: metadata = None # Fetch data from other plugins and add it to metadata with ad-hoc keys for csh in intg.completed_step_handlers: try: prefix = intg.get_plugin_data_prefix(csh.name, csh.suffix) pdata = csh.serialise(intg) except AttributeError: pdata = {} if rank == root: metadata |= {f'{prefix}/{k}': v for k, v in pdata.items()} # Fetch and (if necessary) subset the solution data = dict(self._ele_region_data) for idx, etype, rgn in self._ele_regions: data[etype] = intg.soln[idx][..., rgn].astype(self.fpdtype) # Write out the file solnfname = self._writer.write(data, intg.tcurr, metadata) # If a post-action has been registered then invoke it self._invoke_postaction(intg=intg, mesh=intg.system.mesh.fname, soln=solnfname, t=intg.tcurr) # Update the last output time self.tout_last = intg.tcurr
def test_hex_gleg_ord3_csd(): # Config for a third order spectral difference scheme cfg = Inifile() cfg.set('solver', 'order', '3') cfg.set('solver-elements-hex', 'soln-pts', 'gauss-legendre') cfg.set('solver-elements-hex', 'vcjh-eta', 'sd') # Generate the hexes hb = HexBasis(sy.symbols('p q r'), None, cfg) # Load and import the reference values fobj = BytesIO(pkgutil.get_data(__name__, 'hex-gleg-ord3-csd.npz')) refm = np.load(fobj) assert np.allclose(refm['m0'], np.asanyarray(hb.m0, dtype=np.float)) assert np.allclose(refm['m1'], np.asanyarray(hb.m1, dtype=np.float)) assert np.allclose(refm['m2'], np.asanyarray(hb.m2, dtype=np.float)) assert np.allclose(refm['m3'], np.asanyarray(hb.m3, dtype=np.float))
def partition_soln(soln): # Check the UUID if curruuid != soln['mesh_uuid']: raise ValueError('Mismatched solution/mesh') # Obtain the prefix prefix = Inifile(soln['stats']).get('data', 'prefix') # Combine and repartition the solution newsoln = self._combine_soln_parts(soln, prefix) newsoln = self._partition_soln(newsoln, prefix, vetimap, vparts) # Copy over the metadata for f in soln: if re.match('stats|config|plugins', f): newsoln[f] = soln[f] # Apply the new UUID newsoln['mesh_uuid'] = newuuid return newsoln
class BaseWriter(object): """Functionality for post-processing PyFR data to visualisation formats""" def __init__(self, args): """Loads PyFR mesh and solution files A check is made to ensure the solution was computed on the mesh. :param args: Command line arguments passed from scripts/postp.py :type args: class 'argparse.Namespace' """ self.outf = args.outf # Load mesh and solution files self.soln = read_pyfr_data(args.solnf) self.mesh = read_pyfr_data(args.meshf) # Get element types and array shapes self.mesh_inf = self.mesh.array_info self.soln_inf = self.soln.array_info # Dimensions self.ndims = next(iter(self.mesh_inf.values()))[1][2] self.nvars = next(iter(self.soln_inf.values()))[1][1] # Check solution and mesh are compatible if self.mesh['mesh_uuid'] != self.soln['mesh_uuid']: raise RuntimeError('Solution "%s" was not computed on mesh "%s"' % (args.solnf, args.meshf)) # Load the config file self.cfg = Inifile(self.soln['config']) # System and elements classs self.systemscls = subclass_where( BaseSystem, name=self.cfg.get('solver', 'system') ) self.elementscls = self.systemscls.elementscls
def partition_soln(soln): # Check the UUID if curruuid != soln['mesh_uuid']: raise ValueError('Mismatched solution/mesh') # Obtain the prefix prefix = Inifile(soln['stats']).get('data', 'prefix') # Combine any pre-existing partitions soln = self._combine_soln_parts(soln, prefix) # Partition if self.nparts > 1: newsoln = self._partition_soln(soln, prefix, vetimap, vparts) else: newsoln = soln # Handle the metadata newsoln['config'] = soln['config'] newsoln['stats'] = soln['stats'] newsoln['mesh_uuid'] = newuuid return newsoln
def __call__(self, intg): if abs(self.tout_next - intg.tcurr) > self.tol: return stats = Inifile() stats.set('data', 'fields', ','.join(self.fields)) stats.set('data', 'prefix', 'soln') intg.collect_stats(stats) # Prepare the metadata metadata = dict(intg.cfgmeta, stats=stats.tostr(), mesh_uuid=intg.mesh_uuid) # Write out the file solnfname = self._writer.write(intg.soln, metadata, intg.tcurr) # If a post-action has been registered then invoke it self._invoke_postaction(mesh=intg.system.mesh.fname, soln=solnfname, t=intg.tcurr) # Compute the next output time self.tout_next = intg.tcurr + self.dt_out
def __init__(self, backend, systemcls, rallocs, mesh, initsoln, cfg): self.backend = backend self.rallocs = rallocs self.isrestart = initsoln is not None self.cfg = cfg self.prevcfgs = {f: initsoln[f] for f in initsoln or [] if f.startswith('config-')} # Ensure the system is compatible with our formulation if self.formulation not in systemcls.elementscls.formulations: raise RuntimeError( 'System {0} does not support time stepping formulation {1}' .format(systemcls.name, self.formulation) ) # Start time self.tstart = cfg.getfloat('solver-time-integrator', 'tstart', 0.0) self.tend = cfg.getfloat('solver-time-integrator', 'tend') # Current time; defaults to tstart unless restarting if self.isrestart: stats = Inifile(initsoln['stats']) self.tcurr = stats.getfloat('solver-time-integrator', 'tcurr') else: self.tcurr = self.tstart # List of target times to advance to self.tlist = deque([self.tend]) # Accepted and rejected step counters self.nacptsteps = 0 self.nrjctsteps = 0 self.nacptchain = 0 # Current and minimum time steps self._dt = cfg.getfloat('solver-time-integrator', 'dt') self.dtmin = cfg.getfloat('solver-time-integrator', 'dt-min', 1e-12) # Determine the amount of temp storage required by this method self.nreg = self._stepper_nregs # Construct the relevant mesh partition self._init_system(systemcls, backend, rallocs, mesh, initsoln) # Storage for register banks and current index self._init_reg_banks() # Extract the UUID of the mesh (to be saved with solutions) self.mesh_uuid = mesh['mesh_uuid'] # Get a queue for subclasses to use self._queue = backend.queue() # Global degree of freedom count self._gndofs = self._get_gndofs() # Solution cache self._curr_soln = None # Add kernel cache self._axnpby_kerns = {} # Record the starting wall clock time self._wstart = time.time() # Event handlers for advance_to self.completed_step_handlers = proxylist(self._get_plugins()) # Delete the memory-intensive elements map from the system del self.system.ele_map
def process_run(args): _process_common(args, read_pyfr_data(args.mesh), None, Inifile.load(args.cfg))
def __init__(self, backend, systemcls, rallocs, mesh, initsoln, cfg, stp_nregs, stg_nregs, dt): self.backend = backend sect = 'solver-time-integrator' mgsect = 'solver-dual-time-integrator-multip' # Get the solver order and set the initial multigrid level self._order = self.level = order = cfg.getint('solver', 'order') # Get the multigrid cycle self.cycle, self.csteps = zip(*cfg.getliteral(mgsect, 'cycle')) self.levels = sorted(set(self.cycle), reverse=True) if max(self.cycle) > self._order: raise ValueError('The multigrid level orders cannot exceed ' 'the solution order') if any(abs(i - j) > 1 for i, j in zip(self.cycle, self.cycle[1:])): raise ValueError('The orders of consecutive multigrid levels can ' 'only change by one') if self.cycle[0] != self._order or self.cycle[-1] != self._order: raise ValueError('The multigrid cycle needs to start end with the ' 'highest (solution) order ') # Initialise the number of cycles self.npmgcycles = 0 # Multigrid pseudo-time steps dtau = cfg.getfloat(sect, 'pseudo-dt') self.dtauf = cfg.getfloat(mgsect, 'pseudo-dt-fact', 1.0) self._maxniters = cfg.getint(sect, 'pseudo-niters-max', 0) self._minniters = cfg.getint(sect, 'pseudo-niters-min', 0) # Get the multigrid pseudostepper and pseudocontroller classes pn = cfg.get(sect, 'pseudo-scheme') cn = cfg.get(sect, 'pseudo-controller') cc = subclass_where(BaseDualPseudoController, pseudo_controller_name=cn) cc_none = subclass_where(BaseDualPseudoController, pseudo_controller_name='none') # Construct a pseudo-integrator for each level from pyfr.integrators.dual.pseudo import get_pseudo_stepper_cls self.pintgs = {} for l in self.levels: pc = get_pseudo_stepper_cls(pn, l) if l == order: bases = [cc, pc] mcfg = cfg else: bases = [cc_none, pc] mcfg = Inifile(cfg.tostr()) mcfg.set('solver', 'order', l) mcfg.set(sect, 'pseudo-dt', dtau * self.dtauf**(order - l)) for s in cfg.sections(): if (m := re.match(f'solver-(.*)-mg-p{l}$', s)): mcfg.rename_section(s, f'solver-{m.group(1)}') # A class that bypasses pseudo-controller methods within a cycle class lpsint(*bases): name = 'MultiPPseudoIntegrator' + str(l) aux_nregs = 2 if l != self._order else 0 stepper_nregs = stp_nregs if l == self._order else 0 stage_nregs = stg_nregs if l == self._order else 0 @property def _aux_regidx(iself): if iself.aux_nregs != 0: return iself._regidx[-2:] @property def ntotiters(iself): return self.npmgcycles def convmon(iself, *args, **kwargs): pass def _rhs_with_dts(iself, t, uin, fout, mg_add=True): # Compute -∇·f iself.system.rhs(t, uin, fout) if iself.stage_nregs > 1: iself._add(0, self._stage_regidx[iself.currstg], 1, fout) # Registers vals = iself.stepper_coeffs[:2] + [1] regs = [fout, iself._idxcurr, iself._source_regidx] # Physical stepper source addition -∇·f - dQ/dt iself._addv(vals, regs, subdims=iself._subdims) # Multigrid r addition if mg_add and iself._aux_regidx: iself._add(1, fout, -1, iself._aux_regidx[0]) self.pintgs[l] = lpsint(backend, systemcls, rallocs, mesh, initsoln, mcfg, stp_nregs, stg_nregs, dt)
def __call__(self, intg): tdiff = intg.tcurr - self.tout_last dowrite = tdiff >= self.dtout - self.tol doaccum = intg.nacptsteps % self.nsteps == 0 if dowrite or doaccum: # Evaluate the time averaging expressions currex = self._eval_exprs(intg) # Accumulate them; always do this even when just writing for a, p, c in zip(self.accmex, self.prevex, currex): a += 0.5 * (intg.tcurr - self.prevt) * (p + c) # Save the time and solution self.prevt = intg.tcurr self.prevex = currex if dowrite: # Normalise accmex = [a / tdiff for a in self.accmex] stats = Inifile() stats.set('data', 'prefix', 'tavg') stats.set('data', 'fields', ','.join(k for k, v in self.exprs)) stats.set('tavg', 'tstart', self.tout_last) stats.set('tavg', 'tend', intg.tcurr) intg.collect_stats(stats) metadata = dict(intg.cfgmeta, stats=stats.tostr(), mesh_uuid=intg.mesh_uuid) self._writer.write(accmex, metadata, intg.tcurr) self.tout_last = intg.tcurr self.accmex = [np.zeros_like(a) for a in accmex]
def process_run(args): _process_common( args, NativeReader(args.mesh), None, Inifile.load(args.cfg) )
def process_tavg(args): infs = {} # Interrogate files passed by the shell for fname in args.infs: # Load solution files and obtain solution times inf = read_pyfr_data(fname) cfg = Inifile(inf["stats"]) tinf = cfg.getfloat("solver-time-integrator", "tcurr") # Retain if solution time is within limits if args.limits is None or args.limits[0] <= tinf <= args.limits[1]: infs[tinf] = inf # Verify that solutions were computed on the same mesh3 if inf["mesh_uuid"] != next(iter(infs.values()))["mesh_uuid"]: raise RuntimeError("Solution files in scope were not" " computed on the same mesh") # Sort the solution times, check for sufficient files in scope stimes = sorted(infs) if len(infs) <= 1: raise RuntimeError("More than one solution file is required to " "compute an average") # Initialise progress bar pb = ProgressBar(0, 0, len(stimes), 0) # Copy over the solutions from the first time dump solnfs = infs[stimes[0]].soln_files avgs = {s: infs[stimes[0]][s].copy() for s in solnfs} # Weight the initialised trapezoidal mean dtnext = stimes[1] - stimes[0] for name in solnfs: avgs[name] *= 0.5 * dtnext pb.advance_to(1) # Compute the trapezoidal mean up to the last solution file for i in range(len(stimes[2:])): dtlast = dtnext dtnext = stimes[i + 2] - stimes[i + 1] # Weight the current solution, then add to the mean for name in solnfs: avgs[name] += 0.5 * (dtlast + dtnext) * infs[stimes[i + 1]][name] pb.advance_to(i + 2) # Weight final solution, update mean and normalise for elapsed time for name in solnfs: avgs[name] += 0.5 * dtnext * infs[stimes[-1]][name] avgs[name] *= 1.0 / (stimes[-1] - stimes[0]) pb.advance_to(i + 3) # Compute and assign stats for a time-averaged solution stats = Inifile() stats.set("time-average", "tmin", stimes[0]) stats.set("time-average", "tmax", stimes[-1]) stats.set("time-average", "ntlevels", len(stimes)) avgs["stats"] = stats.tostr() # Copy over the ini file and mesh uuid avgs["config"] = infs[stimes[0]]["config"] avgs["mesh_uuid"] = infs[stimes[0]]["mesh_uuid"] # Save to disk with h5py.File(args.outf, "w") as f: for k, v in avgs.items(): f[k] = v
def process_run(args): return read_pyfr_data(args.mesh), None, Inifile.load(args.cfg)
def __call__(self, intg): # If we are not supposed to be averaging yet then return if intg.tcurr < self.tstart: return # If necessary, run the start-up routines if not self._started: self._init_accumex(intg) self._started = True # See if we are due to write and/or accumulate this step dowrite = intg.tcurr - self.tout_last >= self.dtout - self.tol doaccum = intg.nacptsteps % self.nsteps == 0 if dowrite or doaccum: # Evaluate the time averaging expressions currex = self._eval_acc_exprs(intg) # Accumulate them; always do this even when just writing for a, p, c in zip(self.accex, self.prevex, currex): a += 0.5*(intg.tcurr - self.prevt)*(p + c) # Save the time and solution self.prevt = intg.tcurr self.prevex = currex if dowrite: comm, rank, root = get_comm_rank_root() if self.mode == 'windowed': accex = self.accex tstart = self.tout_last else: for a, c in zip(self.accex, self.caccex): c += a accex = self.caccex tstart = self.tstart_actual # Normalise the accumulated expressions tavg = [a / (intg.tcurr - tstart) for a in accex] # Evaluate any functional expressions if self.fexprs: funex = self._eval_fun_exprs(intg, tavg) tavg = [np.hstack([a, f]) for a, f in zip(tavg, funex)] # Form the output records to be written to disk data = dict(self._ele_region_data) for (idx, etype, rgn), d in zip(self._ele_regions, tavg): data[etype] = d.astype(self.fpdtype) # If we are the root rank then prepare the metadata if rank == root: stats = Inifile() stats.set('data', 'prefix', 'tavg') stats.set('data', 'fields', ','.join(self.outfields)) stats.set('tavg', 'tstart', tstart) stats.set('tavg', 'tend', intg.tcurr) intg.collect_stats(stats) metadata = dict(intg.cfgmeta, stats=stats.tostr(), mesh_uuid=intg.mesh_uuid) else: metadata = None # Write to disk solnfname = self._writer.write(data, intg.tcurr, metadata) # If a post-action has been registered then invoke it self._invoke_postaction(intg=intg, mesh=intg.system.mesh.fname, soln=solnfname, t=intg.tcurr) # Reset the accumulators for a in self.accex: a.fill(0) self.tout_last = intg.tcurr
def __init__(self, backend, systemcls, rallocs, mesh, initsoln, cfg, tcoeffs, dt): self.backend = backend sect = 'solver-time-integrator' mgsect = 'solver-dual-time-integrator-multip' # Get the solver order and set the initial multigrid level self._order = self.level = order = cfg.getint('solver', 'order') # Get the multigrid cycle self.cycle, self.csteps = zip(*cfg.getliteral(mgsect, 'cycle')) self.levels = sorted(set(self.cycle), reverse=True) if max(self.cycle) > self._order: raise ValueError('The multigrid level orders cannot exceed ' 'the solution order') if any(abs(i - j) > 1 for i, j in zip(self.cycle, self.cycle[1:])): raise ValueError('The orders of consecutive multigrid levels can ' 'only change by one') if self.cycle[0] != self._order or self.cycle[-1] != self._order: raise ValueError('The multigrid cycle needs to start end with the ' 'highest (solution) order ') # Initialise the number of cycles self.npmgcycles = 0 # Multigrid pseudo-time steps dtau = cfg.getfloat(sect, 'pseudo-dt') self.dtauf = cfg.getfloat(mgsect, 'pseudo-dt-fact', 1.0) self._maxniters = cfg.getint(sect, 'pseudo-niters-max', 0) self._minniters = cfg.getint(sect, 'pseudo-niters-min', 0) # Get the multigrid pseudostepper and pseudocontroller classes pn = cfg.get(sect, 'pseudo-scheme') cn = cfg.get(sect, 'pseudo-controller') cc = subclass_where(BaseDualPseudoController, pseudo_controller_name=cn) cc_none = subclass_where(BaseDualPseudoController, pseudo_controller_name='none') # Construct a pseudo-integrator for each level from pyfr.integrators.dual.pseudo import get_pseudo_stepper_cls self.pintgs = {} for l in self.levels: pc = get_pseudo_stepper_cls(pn, l) if l == order: bases = [cc, pc] mcfg = cfg else: bases = [cc_none, pc] mcfg = Inifile(cfg.tostr()) mcfg.set('solver', 'order', l) mcfg.set(sect, 'pseudo-dt', dtau * self.dtauf**(order - l)) for s in cfg.sections(): m = re.match(f'solver-(.*)-mg-p{l}$', s) if m: mcfg.rename_section(s, f'solver-{m.group(1)}') # A class that bypasses pseudo-controller methods within a cycle class lpsint(*bases): name = 'MultiPPseudoIntegrator' + str(l) aux_nregs = 2 if l != self._order else 0 @property def _aux_regidx(iself): if iself.aux_nregs != 0: return iself._regidx[-2:] @property def ntotiters(iself): return self.npmgcycles def convmon(iself, *args, **kwargs): pass def finalise_pseudo_advance(iself, *args, **kwargs): pass def _rhs_with_dts(iself, t, uin, fout): # Compute -∇·f iself.system.rhs(t, uin, fout) # Coefficients for the physical stepper svals = [sc / iself._dt for sc in iself._stepper_coeffs] # Physical stepper source addition -∇·f - dQ/dt axnpby = iself._get_axnpby_kerns(len(svals) + 1, subdims=iself._subdims) iself._prepare_reg_banks(fout, iself._idxcurr, *iself._stepper_regidx) iself._queue.enqueue_and_run(axnpby, 1, *svals) # Multigrid r addition if iself._aux_regidx: axnpby = iself._get_axnpby_kerns(2) iself._prepare_reg_banks(fout, iself._aux_regidx[0]) iself._queue.enqueue_and_run(axnpby, 1, -1) self.pintgs[l] = lpsint(backend, systemcls, rallocs, mesh, initsoln, mcfg, tcoeffs, dt) # Get the highest p system from plugins self.system = self.pintgs[self._order].system # Get the convergence monitoring method self.mg_convmon = cc.convmon # Initialise the restriction and prolongation matrices self._init_proj_mats() # Delete remaining elements maps from multigrid systems for l in self.levels[1:]: del self.pintgs[l].system.ele_map