def __init__(self, base_name, t_min=0., t_max=numpy.infty): """ Initialise the cache from a base file name and optional limits on the time levels desired. """ if Parallel.is_parallel(): files = glob.glob(base_name+'_[0-9]*.pvtu') else: files = glob.glob(base_name+'_[0-9]*.vtu') self.data = [] self.reset() print files for filename in files: if Parallel.is_parallel(): pfilename=self.get_piece_filename_from_vtk(filename) else: pfilename=filename time = self.get_time_from_vtk(pfilename) self.data.append([time, pfilename, None, None]) self.data.sort(cmp=lambda x, y: cmp(x[0], y[0])) self.range(t_min, t_max)
def write(self): """ Write the staged vtkPolyData to a file.""" self.poly_data.SetPoints(self.pnts) self.poly_data.Allocate(len(self.cell_ids)) for cell_id in self.cell_ids.values(): self.poly_data.InsertNextCell(vtk.VTK_LINE, cell_id) writer = WRITER[vtk.VTK_POLY_DATA]() writer.SetFileName(self.filename) if Parallel.is_parallel(): writer.SetNumberOfPieces(Parallel.get_size()) writer.SetStartPiece(Parallel.get_rank()) writer.SetEndPiece(Parallel.get_rank()) if vtk.vtkVersion.GetVTKMajorVersion() <= 6: writer.SetWriteSummaryFile(Parallel.get_rank() == 0) else: controller = vtk.vtkMPIController() controller.SetCommunicator( vtk.vtkMPICommunicator.GetWorldCommunicator()) writer.SetController(controller) if vtk.vtkVersion.GetVTKMajorVersion() < 6: writer.SetInput(self.poly_data) else: writer.SetInputData(self.poly_data) writer.Write() if Parallel.is_parallel(): make_subdirectory(self.filename)
def update(self, delta_t=None, *args, **kwargs): """ Update all the particles in the bucket to the next time level.""" logger.info("In ParticleBucket.Update: %d particles", len(self.particles)) # redistribute particles to partitions in case of parallel adaptivity if Parallel.is_parallel(): self.redistribute() # reset the particle timestep if delta_t is not None: self.delta_t = delta_t self.system.temporal_cache.range(self.time, self.time + self.delta_t) live = self.system.in_system(self.pos(), len(self), self.time) _ = [] for k, part in enumerate(self): if live[k]: part.update(self.delta_t, *args, **kwargs) else: self.dead_particles.append(part) _.append(part) for part in _: self.particles.remove(part) self.redistribute() self.insert_particles(*args, **kwargs) self.time += self.delta_t for part in self: part.time = self.time
def get_mesh_filename(self): """Return the mesh file name""" if Parallel.is_parallel(): return libspud.get_option( '/geometry/mesh::CoordinateMesh/from_file/file_name' ) + '_%d.msh' % Parallel.get_rank() # otherwise return libspud.get_option( '/geometry/mesh::CoordinateMesh/from_file/file_name') + '.msh'
def redistribute(self): """ In parallel, redistrbute particles to their owner process.""" if self._online and Parallel.is_parallel(): logger.debug("%d particles before redistribution", len(self.particles)) self.particles = Parallel.distribute_particles( self.particles, self.system) logger.debug("%d particles after redistribution", len(self))
def __init__(self, base_name, t_min=0., t_max=numpy.infty, online=False, parallel_files=False, timescale_factor=1.0, **kwargs): """ Initialise the cache from a base file name and optional limits on the time levels desired. """ self.data = [] self.set_field_names(**kwargs) self.reset() if base_name.rsplit(".", 1)[-1] == "pvd": for time, filename in read_pvd(base_name): self.data.append( [timescale_factor * time, filename, None, None]) else: if (Parallel.is_parallel() and online) or parallel_files: files = glob.glob(base_name + '_[0-9]*.p%s' % kwargs.get('fileext', 'vtu')) else: files = glob.glob(base_name + '_[0-9]*.%s' % kwargs.get('fileext', 'vtu')) for filename in files: if (Parallel.is_parallel() and online): pfilename = get_piece_filename_from_vtk(filename) else: pfilename = filename try: time = self.get_time_from_vtk(pfilename) except: time = int(filename.rsplit('.', 1)[0].rsplit('_', 1)[1]) self.data.append( [timescale_factor * time, pfilename, None, None]) self.data.sort(key=lambda x: x[0]) self.range(t_min, t_max) self.cache = DataCache()
def update_collision_polydata(bucket, base_name, **kwargs): """ Update collisions from data in the particle bucket.""" if Parallel.is_parallel(): fext = 'pvtp' else: fext = 'vtp' collision_list_to_polydata(bucket.collisions(), base_name + '_collisions.' + fext)
def insert_particles(self, *args, **kwargs): """Deal with particle insertion""" for inlet in self.system.boundary.inlets: if Parallel.is_parallel(): n_par = inlet.get_number_of_insertions(self.time, self.delta_t) # if Parallel.get_rank() == 0: # n_par_0 = inlet.get_number_of_insertions(self.time, # self.delta_t) # for i in range(n_par_0): # prob = numpy.random.random() else: n_par = inlet.get_number_of_insertions(self.time, self.delta_t) if n_par == 0: continue weights = inlet.cum_weight(self.time + 0.5 * self.delta_t, self.system.boundary.bnd, self.system.temporal_cache) if weights: for i in range(n_par): prob = numpy.random.random() time = self.time + prob * self.delta_t pos = inlet.select_point(time, weights, self.system.boundary.bnd) if inlet.velocity: vel = numpy.array(inlet.velocity(pos, time)) else: vel = numpy.zeros(3) fvel = self.system.temporal_cache.get_velocity( pos, time) vel[:len(fvel)] = fvel ## update position by fractional timestep pos = pos + vel * (1 - prob) * self.delta_t data, alpha, names = self.system.temporal_cache(time) cell_id, pcoords = vtk_extras.FindCell(data[0][3], pos) if cell_id == -1: continue par = Particle( (pos, vel, time, (1.0 - prob) * self.delta_t), system=self.system, parameters=self.parameters.randomize(), **inlet.kwargs) par.delta_t = self.delta_t par.fields["InsertionTime"] = time self.particles.append(par)
def write_to_file(vtk_data, outfile): """ Wrapper around the various VTK writer routines""" writer = WRITER[vtk_data.GetDataObjectType()]() writer.SetFileName(outfile) if Parallel.is_parallel(): writer.SetNumberOfPieces(Parallel.get_size()) writer.SetStartPiece(Parallel.get_rank()) writer.SetEndPiece(Parallel.get_rank()) writer.SetWriteSummaryFile(Parallel.get_rank()==0) if vtk.vtkVersion.GetVTKMajorVersion()<6: writer.SetInput(vtk_data) else: writer.SetInputData(vtk_data) writer.Write()
def write_to_file(vtk_data, outfile): """ Wrapper around the various VTK writer routines""" writer = WRITER[vtk_data.GetDataObjectType()]() writer.SetFileName(outfile) if Parallel.is_parallel(): writer.SetNumberOfPieces(Parallel.get_size()) writer.SetStartPiece(Parallel.get_rank()) writer.SetEndPiece(Parallel.get_rank()) if vtk.vtkVersion.GetVTKMajorVersion() <= 6: writer.SetWriteSummaryFile(Parallel.get_rank() == 0) else: controller = vtk.vtkMPIController() controller.SetCommunicator( vtk.vtkMPICommunicator.GetWorldCommunicator()) writer.SetController(controller) if vtk.vtkVersion.GetVTKMajorVersion() < 6: writer.SetInput(vtk_data) else: writer.SetInputData(vtk_data) writer.Write() if Parallel.is_parallel(): make_subdirectory(outfile)
def in_system(self, points, size, time): """ Check that the points of X are inside the system data """ out = empty(size, bool) if self.temporal_cache is None or Parallel.is_parallel(): out[:] = True return out obj = self.temporal_cache(time)[0][0][2] loc = vtk.vtkCellLocator() if obj.IsA('vtkUnstructuredGrid'): loc.SetDataSet(obj) else: loc.SetDataSet(obj.GetBlock(0)) loc.BuildLocator() for k, point in enumerate(points): out[k] = loc.FindCell(point) > -1 return out
def in_system(self, points, time): """ Check that the points of X are inside the system data """ out = empty(points.shape[0],bool) if self.temporal_cache is None or Parallel.is_parallel(): out[:] = True return out obj = self.temporal_cache(time)[0][0][2] loc=vtk.vtkCellLocator() if obj.IsA('vtkUnstructuredGrid'): loc.SetDataSet(obj) else: loc.SetDataSet(obj.GetBlock(0)) loc.BuildLocator() for k, point in enumerate(points): out[k] = loc.FindCell(point)> -1 return out
def __init__(self, filename, fields=None): """ Initialize the PolyData instance""" if filename.rsplit('.', 1)[-1] in ('pvtp', 'vtp'): self.filename = filename else: if Parallel.is_parallel(): self.filename = filename + '.pvtp' else: self.filename = filename + '.vtp' self.cell_ids = {} self.poly_data = vtk.vtkPolyData() self.pnts = vtk.vtkPoints() self.pnts.Allocate(0) self.fields = fields or {} self.fields["Time"] = 1 self.arrays = {} for name, num_comps in self.fields.items(): array = vtk.vtkDoubleArray() array.SetName(name) array.SetNumberOfComponents(num_comps) self.poly_data.GetPointData().AddArray(array)
def test_paralle(): assert Parallel.is_parallel()
def write_level_to_polydata(bucket, level, basename=None, do_average=False, field_data=None, **kwargs): """Output a time level of a particle bucket to a vtkPolyData (.vtp) files. Each file contains one time level of the data, and are numbered sequentially. Within each file, each particle is written to seperate pixel. Args: bucket (ParticleBucket): level (int): basename (str): String used in the construction of the file series. The formula is of the form basename_0.vtp, basename_1.vtp,...""" del kwargs field_data = field_data or {} poly_data = vtk.vtkPolyData() pnts = vtk.vtkPoints() pnts.Allocate(0) poly_data.SetPoints(pnts) poly_data.Allocate(len(bucket)) outtime = vtk.vtkDoubleArray() outtime.SetName('Time') outtime.Allocate(1) particle_id = vtk.vtkDoubleArray() particle_id.SetName('ParticleID') particle_id.Allocate(len(bucket)) live = vtk.vtkDoubleArray() live.SetName('Live') live.Allocate(len(bucket)) plive = bucket.system.in_system(bucket.pos(), len(bucket), bucket.time) for _, par in zip(plive, bucket): particle_id.InsertNextValue(hash(par)) if _: live.InsertNextValue(1.0) else: live.InsertNextValue(0.0) velocity = vtk.vtkDoubleArray() velocity.SetNumberOfComponents(3) velocity.Allocate(len(bucket)) velocity.SetName('Particle Velocity') for positions, vel in zip(bucket.pos(), bucket.vel()): pixel = vtk.vtkPixel() pixel.GetPointIds().InsertId( 0, poly_data.GetPoints().InsertNextPoint(positions[0], positions[1], positions[2])) velocity.InsertNextTuple3(vel[0], vel[1], vel[2]) poly_data.InsertNextCell(pixel.GetCellType(), pixel.GetPointIds()) outtime.InsertNextValue(bucket.time) poly_data.GetFieldData().AddArray(outtime) poly_data.GetPointData().AddArray(velocity) poly_data.GetPointData().AddArray(particle_id) poly_data.GetCellData().AddArray(live) for name, num_comps in field_data.items(): _ = vtk.vtkDoubleArray() _.SetName(name) _.SetNumberOfComponents(num_comps) _.Allocate(len(bucket)) for particle in bucket: _.InsertNextValue(particle.fields[name]) poly_data.GetPointData().AddArray(_) if do_average: gsp = calculate_averaged_properties_cpp(poly_data) if Parallel.is_parallel(): file_ext = 'pvtp' else: file_ext = 'vtp' write_to_file(poly_data, "%s_%d.%s" % (basename, level, file_ext)) if do_average: return gsp
import numpy import os import os.path import scipy import copy from scipy.interpolate import griddata TYPES_3D = [vtk.VTK_TETRA, vtk.VTK_QUADRATIC_TETRA] TYPES_2D = [vtk.VTK_TRIANGLE, vtk.VTK_QUADRATIC_TRIANGLE] TYPES_1D = [vtk.VTK_LINE] TYPE_DICT = {1 : vtk.VTK_LINE, 2 : vtk.VTK_TRIANGLE, 4 : vtk.VTK_TETRA, 15 : vtk.VTK_PIXEL} WRITER = {vtk.VTK_UNSTRUCTURED_GRID:(vtk.vtkXMLPUnstructuredGridWriter if Parallel.is_parallel() else vtk.vtkXMLUnstructuredGridWriter), vtk.VTK_POLY_DATA:(vtk.vtkXMLPPolyDataWriter if Parallel.is_parallel() else vtk.vtkXMLPolyDataWriter),} class GmshMesh(object): """This is a class for storing nodes and elements. Members: nodes -- A dict of the form { nodeID: [ xcoord, ycoord, zcoord] } elements -- A dict of the form { elemID: (type, [tags], [nodeIDs]) } Methods: read(file) -- Parse a Gmsh version 1.0 or 2.0 mesh file write(file) -- Output a Gmsh version 2.0 mesh file
ARGI = vtk.mutable(0) WEIGHTS = numpy.zeros(10) SUB_ID = vtk.mutable(0) CELL = vtk.vtkGenericCell() TYPE_DICT = { 1: vtk.VTK_LINE, 2: vtk.VTK_TRIANGLE, 4: vtk.VTK_TETRA, 15: vtk.VTK_PIXEL } WRITER = { vtk.VTK_UNSTRUCTURED_GRID: (vtk.vtkXMLPUnstructuredGridWriter if Parallel.is_parallel() else vtk.vtkXMLUnstructuredGridWriter), vtk.VTK_POLY_DATA: (vtk.vtkXMLPPolyDataWriter if Parallel.is_parallel() else vtk.vtkXMLPolyDataWriter), vtk.VTK_TABLE: (None if Parallel.is_parallel() else vtk.vtkDelimitedTextWriter) } class PolyData(object): """ Class storing a living vtkPolyData construction""" def __init__(self, filename, fields=None): """ Initialize the PolyData instance""" if filename.rsplit('.', 1)[-1] in ('pvtp', 'vtp'): self.filename = filename else:
def write_level_to_polydata(bucket, level, basename=None, do_average=False, **kwargs): """Output a time level of a particle bucket to a vtkPolyData (.vtp) files. Each file contains one time level of the data, and are numbered sequentially. Within each file, each particle is written to seperate pixel. Args: bucket (ParticleBucket): level (int): basename (str): String used in the construction of the file series. The formula is of the form basename_0.vtp, basename_1.vtp,...""" del kwargs poly_data = vtk.vtkPolyData() pnts = vtk.vtkPoints() pnts.Allocate(0) poly_data.SetPoints(pnts) poly_data.Allocate(bucket.pos.shape[0]) outtime = vtk.vtkDoubleArray() outtime.SetName('Time') outtime.Allocate(bucket.pos.shape[0]) particle_id = vtk.vtkDoubleArray() particle_id.SetName('ParticleID') particle_id.Allocate(bucket.pos.shape[0]) for par in enumerate(bucket.particles): particle_id.InsertNextValue(par[1].id()) velocity = vtk.vtkDoubleArray() velocity.SetNumberOfComponents(3) velocity.Allocate(bucket.pos.shape[0]) velocity.SetName('Particle Velocity') for positions, vel in zip(bucket.pos, bucket.vel): pixel = vtk.vtkPixel() pixel.GetPointIds().InsertId(0, poly_data.GetPoints().InsertNextPoint(positions[0], positions[1], positions[2])) outtime.InsertNextValue(bucket.time) velocity.InsertNextTuple3(vel[0], vel[1], vel[2]) poly_data.InsertNextCell(pixel.GetCellType(), pixel.GetPointIds()) poly_data.GetPointData().AddArray(outtime) poly_data.GetPointData().AddArray(velocity) poly_data.GetPointData().AddArray(particle_id) if do_average: gsp=calculate_averaged_properties_cpp(poly_data) if Parallel.is_parallel(): file_ext='pvtp' else: file_ext='vtp' write_to_file(poly_data, "%s_%d.%s"%(basename, level,file_ext)) if do_average: return gsp
def test_serial(): assert not Parallel.is_parallel()
def redistribute(self): if Parallel.is_parallel(): self.particles = Parallel.distribute_particles(self.particles, self.system)