コード例 #1
0
def run_pvbatch_output(params_dict):
  rank =  params_dict["rank"]
  size =  params_dict["size"]
  chunking =  params_dict["chunking"]
  grid_desc = params_dict["grid_desc"]
  time_steps_dict = params_dict["time_steps_dict"]
  paraview_output_file = params_dict["paraview_output_file"]
  catalystscript = params_dict["catalystscript"]

  if rank == 0:
    print "Processing grid chunk(s) on " + str(size) + " MPI ranks"

  c = len(chunking)/size
  r = len(chunking) % size
  if rank < r:
    start = rank * (c + 1)
    stop = start + c
  else:
    start = rank * c + r
    stop = start + (c - 1)

  append = vtk.vtkAppendFilter()
  append.MergePointsOn()
  for idx, chunk in enumerate(chunking[start:stop+1]):
    g = process_grid_chunk(idx, chunk, len(chunking), \
      grid_desc, time_steps_dict, paraview_output_file)
    if g:
      append.AddInputData(g)
  ug = None
  if append.GetInputList().GetNumberOfItems():
    append.Update()
    ug = append.GetOutput()

  if ug is None:
    ug = vtk.vtkUnstructuredGrid()

  report_collective_grid_sizes(ug)

  if catalystscript is not None:
    if rank == 0:
      print "Calling Catalyst over " + str(len(time_steps_dict)) + " time step(s) ..."
    import coprocessor
    coprocessor.initialize()
    coprocessor.addscript(catalystscript)
    id_map = create_cell_global_id_to_local_id_map(ug)
    for idx, time in enumerate(sorted(time_steps_dict.keys())):
      pt = ParallelTimer()
      read_time_step_data(time_steps_dict[time], ug, id_map)
      coprocessor.coprocess(time, idx, ug, paraview_output_file + '.pvd')
      pt.report_collective_time("catalyst output time %s (wall clock time) for step "\
        + str(idx))
    coprocessor.finalize()
  else:
    if rank == 0:
      print "Writing grid files over " + str(len(time_steps_dict)) + " time step(s) ..."
    write_grid_chunk(ug, rank, size, grid_desc, time_steps_dict, \
      paraview_output_file)
コード例 #2
0
ファイル: fedriver.py プロジェクト: xj361685640/ParaView
mpirun -np 4 </path/to/pvbatch> --sym fedriver.py cpscript.py
"""
import numpy
import sys
from mpi4py import MPI

comm = MPI.COMM_WORLD
rank = comm.Get_rank()

import fedatastructures

grid = fedatastructures.GridClass([10, 12, 10], [2, 2, 2])
attributes = fedatastructures.AttributesClass(grid)
doCoprocessing = True

if doCoprocessing:
    import coprocessor
    coprocessor.initialize()
    for i in sys.argv[1:]:
        if rank == 0:
            print('Using Catalyst script', i)
        coprocessor.addscript(i)

for i in range(100):
    attributes.Update(i)
    if doCoprocessing:
        coprocessor.coprocess(i, i, grid, attributes)

if doCoprocessing:
    coprocessor.finalize()
コード例 #3
0
ファイル: fedriver.py プロジェクト: EricAlex/ThirdParty-dev
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:<Catalyst build dir>/lib
export PYTHONPATH=<Catalyst build dir>/lib:<Catalyst build dir>/lib/site-packages
"""
import numpy
import sys
from mpi4py import MPI

comm = MPI.COMM_WORLD
rank = comm.Get_rank()

import fedatastructures

grid = fedatastructures.GridClass([10, 12, 10], [2, 2, 2])
attributes = fedatastructures.AttributesClass(grid)
doCoprocessing = True

if doCoprocessing:
    import coprocessor
    coprocessor.initialize()
    coprocessor.addscript("cpscript.py")

for i in range(100):
    attributes.Update(i)
    if doCoprocessing:
        import coprocessor
        coprocessor.coprocess(i, i, grid, attributes)

if doCoprocessing:
    import coprocessor
    coprocessor.finalize()