コード例 #1
0
ファイル: TemporalCache.py プロジェクト: jrper/ParticleModule
    def __init__(self, base_name, t_min=0., t_max=numpy.infty):
        """
        Initialise the cache from a base file name and optional limits on the time levels desired.
        """

        if Parallel.is_parallel():
            files = glob.glob(base_name+'_[0-9]*.pvtu')
        else:
            files = glob.glob(base_name+'_[0-9]*.vtu')

        self.data = []
        self.reset()

        print files

        for filename in files:
            if Parallel.is_parallel():
                pfilename=self.get_piece_filename_from_vtk(filename)
            else:
                pfilename=filename
            time = self.get_time_from_vtk(pfilename)
            self.data.append([time, pfilename, None, None])

        self.data.sort(cmp=lambda x, y: cmp(x[0], y[0]))
        self.range(t_min, t_max)
コード例 #2
0
ファイル: Options.py プロジェクト: LilyIp84/ParticleModule
 def get_mesh_filename(self):
     """Return the mesh file name"""
     if Parallel.is_parallel():
         return libspud.get_option(
             '/geometry/mesh::CoordinateMesh/from_file/file_name'
         ) + '_%d.msh' % Parallel.get_rank()
     # otherwise
     return libspud.get_option(
         '/geometry/mesh::CoordinateMesh/from_file/file_name') + '.msh'
コード例 #3
0
    def redistribute(self):
        """ In parallel, redistrbute particles to their owner process."""
        if self._online and Parallel.is_parallel():
            logger.debug("%d particles before redistribution",
                         len(self.particles))
            self.particles = Parallel.distribute_particles(
                self.particles, self.system)

            logger.debug("%d particles after redistribution", len(self))
コード例 #4
0
ファイル: IO.py プロジェクト: daveb-dev/ParticleModule
def make_subdirectory(fname):
    """Create directory to store parallel data."""
    Parallel.barrier()
    if Parallel.get_rank() == 0:
        base_name, file_type = fname.rsplit('.', 1)
        if not os.path.isdir(base_name):
            os.mkdir(base_name)
        for _ in glob.glob(base_name + '_*.%s' % file_type[1:]):
            os.rename(_, base_name + '/' + _)
            with open(fname) as summary_file:
                new_text = summary_file.read().replace(_, base_name + '/' + _)
            with open(fname, 'w') as summary_file:
                summary_file.write(new_text)
コード例 #5
0
ファイル: IO.py プロジェクト: jrper/ParticleModule
def write_to_file(vtk_data, outfile):
    """ Wrapper around the various VTK writer routines"""

    writer = WRITER[vtk_data.GetDataObjectType()]()
    writer.SetFileName(outfile)
    if Parallel.is_parallel():
        writer.SetNumberOfPieces(Parallel.get_size())
        writer.SetStartPiece(Parallel.get_rank())
        writer.SetEndPiece(Parallel.get_rank())
        writer.SetWriteSummaryFile(Parallel.get_rank()==0)
    if vtk.vtkVersion.GetVTKMajorVersion()<6:
        writer.SetInput(vtk_data)
    else:
        writer.SetInputData(vtk_data)
    writer.Write()
コード例 #6
0
    def update(self, delta_t=None, *args, **kwargs):
        """ Update all the particles in the bucket to the next time level."""

        logger.info("In ParticleBucket.Update: %d particles",
                    len(self.particles))

        # redistribute particles to partitions in case of parallel adaptivity
        if Parallel.is_parallel():
            self.redistribute()
        # reset the particle timestep
        if delta_t is not None:
            self.delta_t = delta_t
        self.system.temporal_cache.range(self.time, self.time + self.delta_t)
        live = self.system.in_system(self.pos(), len(self), self.time)
        _ = []
        for k, part in enumerate(self):
            if live[k]:
                part.update(self.delta_t, *args, **kwargs)
            else:
                self.dead_particles.append(part)
                _.append(part)
        for part in _:
            self.particles.remove(part)
        self.redistribute()
        self.insert_particles(*args, **kwargs)
        self.time += self.delta_t
        for part in self:
            part.time = self.time
コード例 #7
0
ファイル: ParticleBase.py プロジェクト: jrper/ParticleModule
    def __init__(self, pos, vel, time=0.0, delta_t=1.0, phash=None):

        self.pos = pos
        self.vel = vel
        self.time = time
        self.delta_t = delta_t
        self.id=Parallel.particle_id(phash)
        self._old = None
コード例 #8
0
    def __init__(self, pos, vel, time=0.0, delta_t=1.0, phash=None, **kwargs):

        self.pos = pos
        self.vel = vel
        self.time = time
        self.delta_t = delta_t
        self._hash = Parallel.ParticleId(phash)
        self.fields = {}
        self._old = []
コード例 #9
0
    def __init__(self,
                 base_name,
                 t_min=0.,
                 t_max=numpy.infty,
                 online=False,
                 parallel_files=False,
                 timescale_factor=1.0,
                 **kwargs):
        """
        Initialise the cache from a base file name and optional limits on the time levels desired.
        """

        self.data = []
        self.set_field_names(**kwargs)
        self.reset()

        if base_name.rsplit(".", 1)[-1] == "pvd":
            for time, filename in read_pvd(base_name):
                self.data.append(
                    [timescale_factor * time, filename, None, None])
        else:
            if (Parallel.is_parallel() and online) or parallel_files:
                files = glob.glob(base_name +
                                  '_[0-9]*.p%s' % kwargs.get('fileext', 'vtu'))
            else:
                files = glob.glob(base_name +
                                  '_[0-9]*.%s' % kwargs.get('fileext', 'vtu'))

            for filename in files:
                if (Parallel.is_parallel() and online):
                    pfilename = get_piece_filename_from_vtk(filename)
                else:
                    pfilename = filename
                try:
                    time = self.get_time_from_vtk(pfilename)
                except:
                    time = int(filename.rsplit('.', 1)[0].rsplit('_', 1)[1])
                self.data.append(
                    [timescale_factor * time, pfilename, None, None])

        self.data.sort(key=lambda x: x[0])
        self.range(t_min, t_max)

        self.cache = DataCache()
コード例 #10
0
ファイル: IO.py プロジェクト: daveb-dev/ParticleModule
def update_collision_polydata(bucket, base_name, **kwargs):
    """ Update collisions from data in the particle bucket."""

    if Parallel.is_parallel():
        fext = 'pvtp'
    else:
        fext = 'vtp'

    collision_list_to_polydata(bucket.collisions(),
                               base_name + '_collisions.' + fext)
コード例 #11
0
ファイル: IO.py プロジェクト: daveb-dev/ParticleModule
    def write(self):
        """ Write the staged vtkPolyData to a file."""

        self.poly_data.SetPoints(self.pnts)

        self.poly_data.Allocate(len(self.cell_ids))
        for cell_id in self.cell_ids.values():
            self.poly_data.InsertNextCell(vtk.VTK_LINE, cell_id)

        writer = WRITER[vtk.VTK_POLY_DATA]()
        writer.SetFileName(self.filename)
        if Parallel.is_parallel():
            writer.SetNumberOfPieces(Parallel.get_size())
            writer.SetStartPiece(Parallel.get_rank())
            writer.SetEndPiece(Parallel.get_rank())
            if vtk.vtkVersion.GetVTKMajorVersion() <= 6:
                writer.SetWriteSummaryFile(Parallel.get_rank() == 0)
            else:
                controller = vtk.vtkMPIController()
                controller.SetCommunicator(
                    vtk.vtkMPICommunicator.GetWorldCommunicator())
                writer.SetController(controller)
        if vtk.vtkVersion.GetVTKMajorVersion() < 6:
            writer.SetInput(self.poly_data)
        else:
            writer.SetInputData(self.poly_data)
        writer.Write()

        if Parallel.is_parallel():
            make_subdirectory(self.filename)
コード例 #12
0
    def insert_particles(self, *args, **kwargs):
        """Deal with particle insertion"""

        for inlet in self.system.boundary.inlets:
            if Parallel.is_parallel():
                n_par = inlet.get_number_of_insertions(self.time, self.delta_t)
#                if Parallel.get_rank() == 0:
#                    n_par_0 = inlet.get_number_of_insertions(self.time,
#                                                           self.delta_t)
#                    for i in range(n_par_0):
#                        prob = numpy.random.random()
            else:
                n_par = inlet.get_number_of_insertions(self.time, self.delta_t)
            if n_par == 0:
                continue
            weights = inlet.cum_weight(self.time + 0.5 * self.delta_t,
                                       self.system.boundary.bnd,
                                       self.system.temporal_cache)
            if weights:
                for i in range(n_par):
                    prob = numpy.random.random()
                    time = self.time + prob * self.delta_t
                    pos = inlet.select_point(time, weights,
                                             self.system.boundary.bnd)
                    if inlet.velocity:
                        vel = numpy.array(inlet.velocity(pos, time))
                    else:
                        vel = numpy.zeros(3)
                        fvel = self.system.temporal_cache.get_velocity(
                            pos, time)
                        vel[:len(fvel)] = fvel

                    ## update position by fractional timestep
                    pos = pos + vel * (1 - prob) * self.delta_t

                    data, alpha, names = self.system.temporal_cache(time)
                    cell_id, pcoords = vtk_extras.FindCell(data[0][3], pos)

                    if cell_id == -1:
                        continue

                    par = Particle(
                        (pos, vel, time, (1.0 - prob) * self.delta_t),
                        system=self.system,
                        parameters=self.parameters.randomize(),
                        **inlet.kwargs)

                    par.delta_t = self.delta_t

                    par.fields["InsertionTime"] = time
                    self.particles.append(par)
コード例 #13
0
    def get_time_from_vtk(self, filename):
        """ Get the time from a vtk XML formatted file."""

        parallel_files = ('pvtu', 'pvtp', 'pvtm', 'vtm', 'pvts', 'pvtr')
        parallel = filename.split('.')[-1] in parallel_files

        etree = element_tree(file=filename).getroot()
        assert etree.tag == 'VTKFile'
        if parallel:
            parallel_name = etree[0].findall('Piece')[Parallel.get_rank()].get(
                'Source')
            return self.get_time_from_vtk(parallel_name)
        else:
            for piece in etree[0]:
                for data in piece[0]:
                    if data.get('Name') != self.field_names["Time"]:
                        continue
                    return float(data.get('RangeMin'))
コード例 #14
0
ファイル: TemporalCache.py プロジェクト: jrper/ParticleModule
    def get_time_from_vtk(self, filename):
        """ Get the time from a vtk XML formatted file."""

        PARALLEL_FILES = ('pvtu', 'pvtp', 'pvtm', 'vtm')
        parallel = filename.split('.')[-1] in PARALLEL_FILES


        ftext = open (filename, 'r')
        e = etree.ElementTree(file=filename,
                              parser=etree.XMLParser(recover=True)).getroot()   
        assert e.tag == 'VTKFile' 
        if parallel:
            return self.get_time_from_vtk(e[0].findall('Piece')[Parallel.get_rank()].get('Source'))
        else:
            for piece in e[0]:
                for data in piece[0]:
                    if data.get('Name') != 'Time':
                        continue
                    return float(data.get('RangeMin'))
コード例 #15
0
ファイル: System.py プロジェクト: daveb-dev/ParticleModule
    def in_system(self, points, size, time):
        """ Check that the points of X are inside the system data """

        out = empty(size, bool)

        if self.temporal_cache is None or Parallel.is_parallel():
            out[:] = True
            return out

        obj = self.temporal_cache(time)[0][0][2]
        loc = vtk.vtkCellLocator()

        if obj.IsA('vtkUnstructuredGrid'):
            loc.SetDataSet(obj)
        else:
            loc.SetDataSet(obj.GetBlock(0))
        loc.BuildLocator()

        for k, point in enumerate(points):
            out[k] = loc.FindCell(point) > -1

        return out
コード例 #16
0
ファイル: IO.py プロジェクト: daveb-dev/ParticleModule
    def __init__(self, filename, fields=None):
        """ Initialize the PolyData instance"""

        if filename.rsplit('.', 1)[-1] in ('pvtp', 'vtp'):
            self.filename = filename
        else:
            if Parallel.is_parallel():
                self.filename = filename + '.pvtp'
            else:
                self.filename = filename + '.vtp'
        self.cell_ids = {}
        self.poly_data = vtk.vtkPolyData()
        self.pnts = vtk.vtkPoints()
        self.pnts.Allocate(0)

        self.fields = fields or {}
        self.fields["Time"] = 1

        self.arrays = {}
        for name, num_comps in self.fields.items():
            array = vtk.vtkDoubleArray()
            array.SetName(name)
            array.SetNumberOfComponents(num_comps)
            self.poly_data.GetPointData().AddArray(array)
コード例 #17
0
ファイル: System.py プロジェクト: jrper/ParticleModule
    def in_system(self, points, time):
        """ Check that the points of X are inside the system data """

        out = empty(points.shape[0],bool)

        if self.temporal_cache is None or Parallel.is_parallel():
            out[:] = True
            return out

        obj = self.temporal_cache(time)[0][0][2]
        loc=vtk.vtkCellLocator()

        if obj.IsA('vtkUnstructuredGrid'):
            loc.SetDataSet(obj)
        else:
            loc.SetDataSet(obj.GetBlock(0))
        loc.BuildLocator()



        for k, point in enumerate(points):
            out[k] = loc.FindCell(point)> -1

        return out
コード例 #18
0
ファイル: IO.py プロジェクト: daveb-dev/ParticleModule
def write_to_file(vtk_data, outfile):
    """ Wrapper around the various VTK writer routines"""

    writer = WRITER[vtk_data.GetDataObjectType()]()
    writer.SetFileName(outfile)
    if Parallel.is_parallel():
        writer.SetNumberOfPieces(Parallel.get_size())
        writer.SetStartPiece(Parallel.get_rank())
        writer.SetEndPiece(Parallel.get_rank())
        if vtk.vtkVersion.GetVTKMajorVersion() <= 6:
            writer.SetWriteSummaryFile(Parallel.get_rank() == 0)
        else:
            controller = vtk.vtkMPIController()
            controller.SetCommunicator(
                vtk.vtkMPICommunicator.GetWorldCommunicator())
            writer.SetController(controller)
    if vtk.vtkVersion.GetVTKMajorVersion() < 6:
        writer.SetInput(vtk_data)
    else:
        writer.SetInputData(vtk_data)
    writer.Write()

    if Parallel.is_parallel():
        make_subdirectory(outfile)
コード例 #19
0
def test_serial():

    assert not Parallel.is_parallel()
コード例 #20
0
ファイル: IO.py プロジェクト: daveb-dev/ParticleModule
def write_level_to_polydata(bucket,
                            level,
                            basename=None,
                            do_average=False,
                            field_data=None,
                            **kwargs):
    """Output a time level of a particle bucket to a vtkPolyData (.vtp) files.

    Each file contains one time level of the data, and are numbered sequentially.
    Within each file, each particle is written to seperate pixel.

    Args:
         bucket   (ParticleBucket):
        level    (int):
        basename (str): String used in the construction of the file series.
        The formula is of the form basename_0.vtp, basename_1.vtp,..."""

    del kwargs
    field_data = field_data or {}

    poly_data = vtk.vtkPolyData()
    pnts = vtk.vtkPoints()
    pnts.Allocate(0)
    poly_data.SetPoints(pnts)
    poly_data.Allocate(len(bucket))

    outtime = vtk.vtkDoubleArray()
    outtime.SetName('Time')
    outtime.Allocate(1)

    particle_id = vtk.vtkDoubleArray()
    particle_id.SetName('ParticleID')
    particle_id.Allocate(len(bucket))

    live = vtk.vtkDoubleArray()
    live.SetName('Live')
    live.Allocate(len(bucket))

    plive = bucket.system.in_system(bucket.pos(), len(bucket), bucket.time)

    for _, par in zip(plive, bucket):
        particle_id.InsertNextValue(hash(par))
        if _:
            live.InsertNextValue(1.0)
        else:
            live.InsertNextValue(0.0)

    velocity = vtk.vtkDoubleArray()
    velocity.SetNumberOfComponents(3)
    velocity.Allocate(len(bucket))
    velocity.SetName('Particle Velocity')

    for positions, vel in zip(bucket.pos(), bucket.vel()):
        pixel = vtk.vtkPixel()
        pixel.GetPointIds().InsertId(
            0,
            poly_data.GetPoints().InsertNextPoint(positions[0], positions[1],
                                                  positions[2]))
        velocity.InsertNextTuple3(vel[0], vel[1], vel[2])
        poly_data.InsertNextCell(pixel.GetCellType(), pixel.GetPointIds())

    outtime.InsertNextValue(bucket.time)

    poly_data.GetFieldData().AddArray(outtime)
    poly_data.GetPointData().AddArray(velocity)
    poly_data.GetPointData().AddArray(particle_id)
    poly_data.GetCellData().AddArray(live)

    for name, num_comps in field_data.items():
        _ = vtk.vtkDoubleArray()
        _.SetName(name)
        _.SetNumberOfComponents(num_comps)
        _.Allocate(len(bucket))
        for particle in bucket:
            _.InsertNextValue(particle.fields[name])
        poly_data.GetPointData().AddArray(_)

    if do_average:
        gsp = calculate_averaged_properties_cpp(poly_data)

    if Parallel.is_parallel():
        file_ext = 'pvtp'
    else:
        file_ext = 'vtp'

    write_to_file(poly_data, "%s_%d.%s" % (basename, level, file_ext))

    if do_average:
        return gsp
コード例 #21
0
def test_paralle():

    assert Parallel.is_parallel()
コード例 #22
0
ファイル: IO.py プロジェクト: daveb-dev/ParticleModule
ARGI = vtk.mutable(0)
WEIGHTS = numpy.zeros(10)
SUB_ID = vtk.mutable(0)
CELL = vtk.vtkGenericCell()

TYPE_DICT = {
    1: vtk.VTK_LINE,
    2: vtk.VTK_TRIANGLE,
    4: vtk.VTK_TETRA,
    15: vtk.VTK_PIXEL
}

WRITER = {
    vtk.VTK_UNSTRUCTURED_GRID:
    (vtk.vtkXMLPUnstructuredGridWriter
     if Parallel.is_parallel() else vtk.vtkXMLUnstructuredGridWriter),
    vtk.VTK_POLY_DATA: (vtk.vtkXMLPPolyDataWriter if Parallel.is_parallel()
                        else vtk.vtkXMLPolyDataWriter),
    vtk.VTK_TABLE:
    (None if Parallel.is_parallel() else vtk.vtkDelimitedTextWriter)
}


class PolyData(object):
    """ Class storing a living vtkPolyData construction"""
    def __init__(self, filename, fields=None):
        """ Initialize the PolyData instance"""

        if filename.rsplit('.', 1)[-1] in ('pvtp', 'vtp'):
            self.filename = filename
        else:
コード例 #23
0
ファイル: TemporalCache.py プロジェクト: jrper/ParticleModule
 def get_piece_filename_from_vtk(self, filename, piece=Parallel.get_rank()):
      e = etree.ElementTree(file=filename,
                           parser=etree.XMLParser(recover=True)).getroot()
      return e[0].findall('Piece')[Parallel.get_rank()].get('Source')
コード例 #24
0
ファイル: test_parallel.py プロジェクト: jrper/ParticleModule
def test_serial():

    assert not Parallel.is_parallel()
コード例 #25
0
def get_piece_filename_from_vtk(filename, piece=Parallel.get_rank()):
    """Get the filename of individual VTK file piece."""

    etree = element_tree(file=filename).getroot()
    return etree[0].findall('Piece')[piece].get('Source')
コード例 #26
0
ファイル: IO.py プロジェクト: jrper/ParticleModule
import numpy
import os
import os.path
import scipy
import copy
from scipy.interpolate import griddata

TYPES_3D = [vtk.VTK_TETRA, vtk.VTK_QUADRATIC_TETRA]
TYPES_2D = [vtk.VTK_TRIANGLE, vtk.VTK_QUADRATIC_TRIANGLE]
TYPES_1D = [vtk.VTK_LINE]

TYPE_DICT = {1 : vtk.VTK_LINE, 2 : vtk.VTK_TRIANGLE, 4 : vtk.VTK_TETRA,
             15 : vtk.VTK_PIXEL}

WRITER = {vtk.VTK_UNSTRUCTURED_GRID:(vtk.vtkXMLPUnstructuredGridWriter 
                                     if Parallel.is_parallel() 
                                     else vtk.vtkXMLUnstructuredGridWriter),
          vtk.VTK_POLY_DATA:(vtk.vtkXMLPPolyDataWriter
                             if Parallel.is_parallel() 
                             else vtk.vtkXMLPolyDataWriter),}

class GmshMesh(object):
    """This is a class for storing nodes and elements.

    Members:
    nodes -- A dict of the form { nodeID: [ xcoord, ycoord, zcoord] }
    elements -- A dict of the form { elemID: (type, [tags], [nodeIDs]) }

    Methods:
    read(file) -- Parse a Gmsh version 1.0 or 2.0 mesh file
    write(file) -- Output a Gmsh version 2.0 mesh file
コード例 #27
0
""" Test parallel execution."""
from particle_model import Parallel
import pytest


@pytest.mark.skipif(Parallel.get_size() > 1, reason='Serial test')
def test_serial():

    assert not Parallel.is_parallel()


@pytest.mark.skipif(Parallel.get_size() == 1, reason='Parallel test')
def test_paralle():

    assert Parallel.is_parallel()
コード例 #28
0
ファイル: test_parallel.py プロジェクト: jrper/ParticleModule
def test_paralle():

    assert Parallel.is_parallel()
コード例 #29
0
ファイル: Particles.py プロジェクト: jrper/ParticleModule
 def redistribute(self):
     if Parallel.is_parallel():
         self.particles = Parallel.distribute_particles(self.particles,
                                                        self.system)
コード例 #30
0
ファイル: test_parallel.py プロジェクト: jrper/ParticleModule
""" Test parallel execution."""
from particle_model import Parallel
import pytest


@pytest.mark.skipif(Parallel.get_size() > 1, reason="Serial test")
def test_serial():

    assert not Parallel.is_parallel()


@pytest.mark.skipif(Parallel.get_size() == 1, reason="Parallel test")
def test_paralle():

    assert Parallel.is_parallel()
コード例 #31
0
ファイル: IO.py プロジェクト: jrper/ParticleModule
def write_level_to_polydata(bucket, level, basename=None, do_average=False,  **kwargs):

    """Output a time level of a particle bucket to a vtkPolyData (.vtp) files.

    Each file contains one time level of the data, and are numbered sequentially.
    Within each file, each particle is written to seperate pixel.

    Args:
         bucket   (ParticleBucket):
        level    (int):
        basename (str): String used in the construction of the file series.
        The formula is of the form basename_0.vtp, basename_1.vtp,..."""

    del kwargs

    poly_data = vtk.vtkPolyData()
    pnts = vtk.vtkPoints()
    pnts.Allocate(0)
    poly_data.SetPoints(pnts)
    poly_data.Allocate(bucket.pos.shape[0])

    outtime = vtk.vtkDoubleArray()
    outtime.SetName('Time')
    outtime.Allocate(bucket.pos.shape[0])

    particle_id = vtk.vtkDoubleArray()
    particle_id.SetName('ParticleID')
    particle_id.Allocate(bucket.pos.shape[0])

    for par in enumerate(bucket.particles):
        particle_id.InsertNextValue(par[1].id())


    velocity = vtk.vtkDoubleArray()
    velocity.SetNumberOfComponents(3)
    velocity.Allocate(bucket.pos.shape[0])
    velocity.SetName('Particle Velocity')

    for positions, vel in zip(bucket.pos, bucket.vel):
        pixel = vtk.vtkPixel()
        pixel.GetPointIds().InsertId(0,
                                     poly_data.GetPoints().InsertNextPoint(positions[0],
                                                                           positions[1],
                                                                           positions[2]))
        outtime.InsertNextValue(bucket.time)
        velocity.InsertNextTuple3(vel[0], vel[1], vel[2])
        poly_data.InsertNextCell(pixel.GetCellType(), pixel.GetPointIds())

    poly_data.GetPointData().AddArray(outtime)
    poly_data.GetPointData().AddArray(velocity)
    poly_data.GetPointData().AddArray(particle_id)

    if do_average:
        gsp=calculate_averaged_properties_cpp(poly_data)

    if Parallel.is_parallel():
        file_ext='pvtp'
    else:
        file_ext='vtp'

    write_to_file(poly_data, "%s_%d.%s"%(basename, level,file_ext))

    if do_average:
        return gsp