Exemple #1
0
def _global_func(impl, array, axis, controller):
    if type(array) == dsa.VTKCompositeDataArray:
        if axis is None or axis == 0:
            res = impl.serial_composite(array, axis)
        else:
            res = apply_ufunc(impl.op(), array, (axis, ))
    else:
        res = impl.op()(array, axis)
        if res is not dsa.NoneArray:
            res = res.astype(numpy.float64)

    if axis is None or axis == 0:
        if controller is None and vtkMultiProcessController is not None:
            controller = vtkMultiProcessController.GetGlobalController()
        if controller and controller.IsA("vtkMPIController"):
            from mpi4py import MPI
            comm = vtkMPI4PyCommunicator.ConvertToPython(
                controller.GetCommunicator())

            max_dims, size = _reduce_dims(res, comm)

            # All NoneArrays
            if size == 0:
                return dsa.NoneArray

            if res is dsa.NoneArray:
                if max_dims is 1:
                    # Weird trick to make the array look like a scalar
                    max_dims = ()
                res = numpy.empty(max_dims)
                res.fill(impl.default())

            res_recv = numpy.array(res)
            mpi_type = _lookup_mpi_type(res.dtype)
            comm.Allreduce([res, mpi_type], [res_recv, mpi_type],
                           impl.mpi_op())
            if array is dsa.NoneArray:
                return dsa.NoneArray
            res = res_recv

    return res
Exemple #2
0
def _array_count(array, axis, controller):

    if array is dsa.NoneArray:
        size = numpy.int64(0)
    elif axis is None:
        size = numpy.int64(array.size)
    else:
        size = numpy.int64(shape(array)[0])

    if controller is None and vtkMultiProcessController is not None:
        controller = vtkMultiProcessController.GetGlobalController()

    if controller and controller.IsA("vtkMPIController"):
        from mpi4py import MPI
        comm = vtkMPI4PyCommunicator.ConvertToPython(controller.GetCommunicator())

        total_size = numpy.array(size, dtype=numpy.int64)
        mpitype = _lookup_mpi_type(numpy.int64)
        comm.Allreduce([size, mpitype], [total_size, mpitype], MPI.SUM)
        size = total_size

    return size
Exemple #3
0
def unstructured_from_composite_arrays(points, arrays, controller=None):
    """Given a set of VTKCompositeDataArrays, creates a vtkUnstructuredGrid.
    The main goal of this function is to transform the output of XXX_per_block()
    methods to a single dataset that can be visualized and further processed.
    Here arrays is an iterable (e.g. list) of (array, name) pairs. Here is
    an example:

    centroid = mean_per_block(composite_data.Points)
    T = mean_per_block(composite_data.PointData['Temperature'])
    ug = unstructured_from_composite_arrays(centroid, (T, 'Temperature'))

    When called in parallel, this function makes sure that each array in
    the input dataset is represented only on 1 process. This is important
    because methods like mean_per_block() return the same value for blocks
    that are partitioned on all of the participating processes. If the
    same point were to be created across multiple processes in the output,
    filters like histogram would report duplicate values erroneously.
    """

    try:
        dataset = points.DataSet
    except AttributeError:
        dataset = None

    if dataset is None and points is not dsa.NoneArray:
        raise ValueError(
            "Expecting a points arrays with an associated dataset.")

    if points is dsa.NoneArray:
        cpts = []
    else:
        cpts = points.Arrays
    ownership = numpy.zeros(len(cpts), dtype=numpy.int32)
    rank = 0

    # Let's first create a map of array index to composite ids.
    if dataset is None:
        ids = []
    else:
        it = dataset.NewIterator()
        it.UnRegister(None)
        itr = cpts.__iter__()
        ids = numpy.empty(len(cpts), dtype=numpy.int32)
        counter = 0
        while not it.IsDoneWithTraversal():
            _id = it.GetCurrentFlatIndex()
            ids[counter] = _id
            counter += 1
            it.GoToNextItem()

    if controller is None and vtkMultiProcessController is not None:
        controller = vtkMultiProcessController.GetGlobalController()
    if controller and controller.IsA("vtkMPIController"):
        from mpi4py import MPI
        comm = vtkMPI4PyCommunicator.ConvertToPython(
            controller.GetCommunicator())
        rank = comm.Get_rank()

        # Determine the max id to use for reduction
        # operations

        # Get all ids from dataset, including empty ones.
        lmax_id = numpy.int32(0)
        if dataset is not None:
            it = dataset.NewIterator()
            it.UnRegister(None)
            it.SetSkipEmptyNodes(False)
            while not it.IsDoneWithTraversal():
                _id = it.GetCurrentFlatIndex()
                lmax_id = numpy.max((lmax_id, _id)).astype(numpy.int32)
                it.GoToNextItem()
        max_id = numpy.array(0, dtype=numpy.int32)
        mpitype = _lookup_mpi_type(numpy.int32)
        comm.Allreduce([lmax_id, mpitype], [max_id, mpitype], MPI.MAX)

        # Now we figure out which processes have which ids
        lownership = numpy.empty(max_id, dtype=numpy.int32)
        lownership.fill(numpy.iinfo(numpy.int32).max)

        ownership = numpy.empty(max_id, dtype=numpy.int32)

        if dataset is not None:
            it = dataset.NewIterator()
            it.UnRegister(None)
            it.InitTraversal()
            itr = cpts.__iter__()
            while not it.IsDoneWithTraversal():
                _id = it.GetCurrentFlatIndex()
                if itr.next() is not dsa.NoneArray:
                    lownership[_id] = rank
                it.GoToNextItem()
        mpitype = _lookup_mpi_type(numpy.int32)
        # The process with the lowest id containing a block will
        # produce the output for that block.
        comm.Allreduce([lownership, mpitype], [ownership, mpitype], MPI.MIN)

    # Iterate over blocks to produce points and arrays
    from vtk.vtkCommonDataModel import vtkUnstructuredGrid
    from vtk.vtkCommonCore import vtkDoubleArray, vtkPoints
    ugrid = vtkUnstructuredGrid()
    da = vtkDoubleArray()
    da.SetNumberOfComponents(3)
    pts = vtkPoints()
    pts.SetData(da)
    counter = 0
    for pt in cpts:
        if ownership[ids[counter]] == rank:
            pts.InsertNextPoint(tuple(pt))
        counter += 1
    ugrid.SetPoints(pts)

    for ca, name in arrays:
        if ca is not dsa.NoneArray:
            da = vtkDoubleArray()
            ncomps = ca.Arrays[0].flatten().shape[0]
            da.SetNumberOfComponents(ncomps)
            counter = 0
            for a in ca.Arrays:
                if ownership[ids[counter]] == rank:
                    a = a.flatten()
                    for i in range(ncomps):
                        da.InsertNextValue(a[i])
                counter += 1
            if len(a) > 0:
                da.SetName(name)
                ugrid.GetPointData().AddArray(da)
    return ugrid
Exemple #4
0
def _global_per_block(impl, array, axis=None, controller=None):
    if axis > 0:
        return impl.op()(array, axis=axis, controller=controller)

    try:
        dataset = array.DataSet
    except AttributeError:
        dataset = None

    t = type(array)
    if t == dsa.VTKArray or t == numpy.ndarray:
        from vtk.vtkCommonDataModel import vtkMultiBlockDataSet
        array = dsa.VTKCompositeDataArray([array])
        ds = vtkMultiBlockDataSet()
        ds.SetBlock(0, dataset.VTKObject)
        dataset = ds

    results = _apply_func2(impl.op2(), array, (axis, ))

    if controller is None and vtkMultiProcessController is not None:
        controller = vtkMultiProcessController.GetGlobalController()
    if controller and controller.IsA("vtkMPIController"):
        from mpi4py import MPI
        comm = vtkMPI4PyCommunicator.ConvertToPython(
            controller.GetCommunicator())

        # First determine the number of components to use
        # for reduction
        res = dsa.NoneArray
        for res in results:
            if res is not dsa.NoneArray:
                break

        max_dims, size = _reduce_dims(res, comm)

        # All NoneArrays
        if size == 0:
            return dsa.NoneArray

        # Next determine the max id to use for reduction
        # operations

        # Get all ids from dataset, including empty ones.
        ids = []
        lmax_id = numpy.int32(0)
        if dataset is not None:
            it = dataset.NewIterator()
            it.UnRegister(None)
            it.SetSkipEmptyNodes(False)
            while not it.IsDoneWithTraversal():
                _id = it.GetCurrentFlatIndex()
                lmax_id = numpy.max((lmax_id, _id)).astype(numpy.int32)
                if it.GetCurrentDataObject() is not None:
                    ids.append(_id)
                it.GoToNextItem()
        max_id = numpy.array(0, dtype=numpy.int32)
        mpitype = _lookup_mpi_type(numpy.int32)
        comm.Allreduce([lmax_id, mpitype], [max_id, mpitype], MPI.MAX)

        has_ids = numpy.zeros(max_id + 1, dtype=numpy.int32)
        for _id in ids:
            has_ids[_id] = 1
        id_count = numpy.array(has_ids)
        comm.Allreduce([has_ids, mpitype], [id_count, mpitype], MPI.SUM)

        if numpy.all(id_count <= 1):
            return dsa.VTKCompositeDataArray(results, dataset=dataset)

        # Now that we know which blocks are shared by more than
        # 1 rank. The ones that have a count of 2 or more.
        reduce_ids = []
        for _id in xrange(len(id_count)):
            if id_count[_id] > 1:
                reduce_ids.append(_id)

        to_reduce = len(reduce_ids)
        # If not block is shared, short circuit. No need to
        # communicate any more.
        if to_reduce == 0:
            return dsa.VTKCompositeDataArray(results, dataset=dataset)

        # Create the local array that will be used for
        # reduction. Set it to a value that won't effect
        # the reduction.
        lresults = numpy.empty(size * to_reduce)
        lresults.fill(impl.default())

        # Just get non-empty ids. Doing this again in case
        # the traversal above results in a different order.
        # We need the same order since we'll use izip below.
        if dataset is not None:
            it = dataset.NewIterator()
            it.UnRegister(None)
            ids = []
            while not it.IsDoneWithTraversal():
                ids.append(it.GetCurrentFlatIndex())
                it.GoToNextItem()

        # Fill the local array with available values.
        for _id, _res in izip(ids, results):
            success = True
            try:
                loc = reduce_ids.index(_id)
            except ValueError:
                success = False
            if success:
                if _res is not dsa.NoneArray:
                    lresults[loc * size:(loc + 1) * size] = _res.flatten()

        # Now do the MPI reduction.
        rresults = numpy.array(lresults)
        mpitype = _lookup_mpi_type(numpy.double)
        comm.Allreduce([lresults, mpitype], [rresults, mpitype], impl.mpi_op())

        if array is dsa.NoneArray:
            return dsa.NoneArray

        # Fill in the reduced values.
        for i in xrange(to_reduce):
            _id = reduce_ids[i]
            success = True
            try:
                loc = ids.index(_id)
            except ValueError:
                success = False
            if success:
                if size == 1:
                    results[loc] = dsa.VTKArray(rresults[i])
                else:
                    results[loc] = rresults[i * size:(i + 1) *
                                            size].reshape(max_dims)

    return dsa.VTKCompositeDataArray(results, dataset=dataset)
Exemple #5
0
def RequestData():
    import math
    import numpy
    import paraview
    import vtk.numpy_interface.dataset_adapter
    import vtk.numpy_interface.algorithms
    #from mpi4py import MPI
    try:
        from vtk.vtkParallelCore import vtkMultiProcessController
        from vtk.vtkParallelMPI4Py import vtkMPI4PyCommunicator
    except ImportError:
        vtkMultiProcessController = None
        vtkMPI4PyCommunicator = None
    # -- this will import vtkMultiProcessController and vtkMPI4PyCommunicator
    #if controller is None and vtkMultiProcessController is not None:
    #    controller = vtkMultiProcessController.GetGlobalController()
    controller = vtkMultiProcessController.GetGlobalController()
    nProcs = controller.GetNumberOfProcesses()
    print ' nProcs: ', nProcs
    if controller and controller.IsA(
            "vtkMPIController") and controller.GetNumberOfProcesses() > 1:
        from mpi4py import MPI
        comm = vtkMPI4PyCommunicator.ConvertToPython(
            controller.GetCommunicator())
        rank = comm.Get_rank()
    else:
        rank = 0

    # This script computes the particle distribution function. Missing: Selection of
    # spacial coordinate and velocity
    deltaX = float(xMax - xMin) / float(NumberOfSpaceBins)
    deltaV = float(maxVelo - minVelo) / float(NumberOfVeloBins)
    # input
    input = self.GetInputDataObject(0, 0)
    if input.IsA("vtkMultiBlockDataSet"):
        # here: new format with vtk-multiblock
        print(" vtkMultiBlockDataSet")
        iter = input.NewIterator()
        iter.UnRegister(None)
        iter.InitTraversal()
        pdi = iter.GetCurrentDataObject()
    else:
        # old format without multiblock
        pdi = input.GetInput()
    nParts = pdi.GetNumberOfPoints()
    if nProcs > 1:
        #nTotalParts = numpy.array(0, 'i')
        #nTotalParts=[]
        #comm.Allreduce([nParts, MPI.INT], [nTotalParts, MPI.INT], op=MPI.SUM)
        nTotalParts = comm.allreduce(nParts, op=MPI.SUM)
    else:
        nTotalParts = nParts
    if rank == 0:
        print ' nTotalParts:', nTotalParts
    # output
    pdo = self.GetOutputDataObject(0)
    pdo = self.GetOutput()
    # generate 2d grid
    pdo.SetDimensions(NumberOfSpaceBins + 1, NumberOfVeloBins + 1, 0)
    deltaXplot = 1. / float(NumberOfSpaceBins)
    deltaVplot = 1. / float(NumberOfVeloBins)
    if (maxVelo == -minVelo):
        pdo.SetOrigin(0, -0.5, 0.)
    else:
        pdo.SetOrigin(0, 0.0, 0.)
    pdo.SetSpacing(deltaXplot, deltaVplot, 1.)
    # On ParaView 3.98, 4.0 and 4.1
    pdo.SetExtent(0, NumberOfSpaceBins, 0, NumberOfVeloBins, 0, 1)
    PDF = numpy.zeros((NumberOfSpaceBins, NumberOfVeloBins), dtype='float')
    # generate array
    # loop over all particles
    nPartsMin = 0
    nPartsMax = 0
    nPartsIn = 0
    nXmin = 0
    nXmax = 0
    for i in range(0, nParts):
        coord = pdi.GetPoint(i)
        pos = coord[iDirect]
        if (iVelocity == 3):
            # vabs
            vx = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i)
            vy = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i + 1)
            vz = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i + 2)
            velo = math.sqrt(vx**2 + vy**2 + vz**2)
        elif (iVelocity == 4):
            # v-tang to x
            vy = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i + 1)
            vz = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i + 2)
            velo = math.sqrt(vy**2 + vz**2)
        elif (iVelocity == 5):
            # vabv-tang to y
            vx = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i)
            vz = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i + 2)
            velo = math.sqrt(vx**2 + vz**2)
        elif (iVelocity == 6):
            # vabv-tang to z
            vx = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i)
            vy = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i + 1)
            velo = math.sqrt(vx**2 + vy**2)
        else:
            # velocity in x,y or z
            velo = pdi.GetPointData().GetArray("Velocity").GetValue(3 * i +
                                                                    iVelocity)

        #if (iVelocity!=3):
        #  velo  = pdi.GetPointData().GetArray("Velocity").GetValue(3*i + iVelocity)
        #else:
        #  vx =pdi.GetPointData().GetArray("Velocity").GetValue(3*i    )
        #  vy =pdi.GetPointData().GetArray("Velocity").GetValue(3*i + 1)
        #  vz =pdi.GetPointData().GetArray("Velocity").GetValue(3*i + 2)
        #  velo=math.sqrt(vx**2+vy**2+vz**2)
        # check x range
        if (xMin > pos):
            nXmin = nXmin + 1
        elif (xMax < pos):
            nMax = nXmax + 1
        else:
            # particle in x-range
            if (minVelo > velo):
                nPartsMin = nPartsMin + 1
            elif (maxVelo < velo):
                nPartsMax = nPartsMax + 1
            else:
                # compute position in 2d-space pos-velo array
                ipos = int((pos - xMin) / deltaX)  #+1
                ivelo = int((velo - minVelo) / deltaV)  #+1
                nPartsIn = nPartsIn + 1
                PDF[ipos, ivelo] = PDF[ipos, ivelo] + 1.0  #/float(nParts)
    totalPDF = numpy.zeros((NumberOfSpaceBins, NumberOfVeloBins),
                           dtype='float')
    if nProcs > 1:
        # mpi stuff
        nTotalPartsIn = comm.allreduce(nPartsIn, op=MPI.SUM)
        totalPDF = comm.allreduce(PDF, op=MPI.SUM)
        for j in range(0, NumberOfVeloBins):
            for i in range(0, NumberOfSpaceBins):
                totalPDF[i, j] = totalPDF[i, j] / float(nTotalPartsIn)
    else:
        nTotalPartsIn = nPartsIn
        for j in range(0, NumberOfVeloBins):
            for i in range(0, NumberOfSpaceBins):
                totalPDF[i, j] = PDF[i, j] / float(nTotalPartsIn)
    array = vtk.vtkFloatArray()
    if nProcs > 1:
        # sum up to total
        nTotalXmin = comm.reduce(nXmin, op=MPI.SUM)
        nTotalXmax = comm.reduce(nXmax, op=MPI.SUM)
        nTotalPartsMin = comm.reduce(nPartsMin, op=MPI.SUM)
        nTotalPartsMax = comm.reduce(nPartsMax, op=MPI.SUM)
    else:
        nTotalXmin = nXmin
        nTotalXmax = nXmax
        nTotalPartsMin = nPartsMin
        nTotalPartsMax = nPartsMax
    nTotalPartsOut = nTotalParts - nTotalPartsIn
    if rank == 0:
        # output
        if (nTotalXmin > 0) or (nTotalXmax > 0):
            print " Particles out of coordinate range."
            print " nMinOut: ", nTotalXmin
            print " nMaxOut: ", nTotalXmax
            print " Percent coord out: ", float(
                nTotalXmin + nTotalXmax) / float(nTotalParts) * 100.0
        if (nTotalPartsMin > 0) or (nTotalPartsMax > 0):
            print " Particles out of velocity range. Velocity truncated!!!"
            print " nPartsMin: ", nTotalPartsMin
            print " nPartsMax: ", nTotalPartsMax
            print " Percent velo out of nPartsIn:     ", float(
                nTotalPartsMin + nTotalPartsMax) / float(nTotalPartsIn) * 100.0
            print " Percent velo out of nParts:       ", float(
                nTotalPartsMin + nTotalPartsMax) / float(nTotalParts) * 100.0
        if (nTotalPartsOut > 0):
            print " nPartsIn:  ", nTotalPartsIn
            print " total out: ", nTotalPartsOut
            print " Percent nPartIn:  ", float(nTotalPartsIn) / float(
                nTotalParts) * 100.0
            print " Percent nPartOut: ", float(nTotalPartsOut) / float(
                nTotalParts) * 100.0
    array.SetName("PDF")
    array.SetNumberOfComponents(1)
    ncells = NumberOfSpaceBins * NumberOfVeloBins
    array.SetNumberOfTuples(ncells)
    pdo.GetCellData().AddArray(array)
    ipos = 0
    for j in range(0, NumberOfVeloBins):
        for i in range(0, NumberOfSpaceBins):
            # caution: transpoesed index because of storage
            #array.SetValue(ipos,totalPDF[i,j]/float(nPartsIn))
            array.SetValue(ipos, totalPDF[i, j])
            ipos = ipos + 1