Esempio n. 1
0
ds2 = dsa.WrapDataObject(w2.GetOutput())
rtData2 = ds2.PointData['RTData']
grad2 = algs.gradient(rtData2)

npts = numpy.array(numpy.int32(ds.GetNumberOfPoints()))
total_npts = numpy.array(npts)
MPI.COMM_WORLD.Allreduce([npts, MPI.INT], [total_npts, MPI.INT], MPI.SUM)

# Test simple distributed data.
testArrays(rtData, rtData2, grad, grad2, total_npts)

# Check that we can disable parallelism by using a dummy controller
# even when a global controller is set
assert algs.sum(rtData / rtData,
                controller=vtk.vtkDummyController()) != total_npts

# Test where arrays are NoneArray on one of the ranks.
if size > 1:
    if rank == 0:
        rtData3 = rtData2
        grad3 = grad2
    else:
        rtData3 = dsa.NoneArray
        grad3 = dsa.NoneArray

    testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test composite arrays
rtData3 = dsa.VTKCompositeDataArray([rtData, dsa.NoneArray])
grad3 = dsa.VTKCompositeDataArray([dsa.NoneArray, grad])
Esempio n. 2
0
w2.Update()

ds2 = dsa.WrapDataObject(w2.GetOutput())
rtData2 = ds2.PointData['RTData']
grad2 = algs.gradient(rtData2)

npts = numpy.array(numpy.int32(ds.GetNumberOfPoints()))
total_npts = numpy.array(npts)
MPI.COMM_WORLD.Allreduce([npts, MPI.INT], [total_npts, MPI.INT], MPI.SUM)

# Test simple distributed data.
testArrays(rtData, rtData2, grad, grad2, total_npts)

# Check that we can disable parallelism by using a dummy controller
# even when a global controller is set
assert algs.sum(rtData / rtData, controller=vtk.vtkDummyController()) != total_npts

# Test where arrays are NoneArray on one of the ranks.
if size > 1:
    if rank == 0:
        rtData3 = rtData2
        grad3 = grad2
    else:
        rtData3 = dsa.NoneArray
        grad3 = dsa.NoneArray

    testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test composite arrays
rtData3 = dsa.VTKCompositeDataArray([rtData, dsa.NoneArray])
grad3 = dsa.VTKCompositeDataArray([dsa.NoneArray, grad])
w2.Update()

ds2 = dsa.WrapDataObject(w2.GetOutput())
rtData2 = ds2.PointData['RTData']
grad2 = algs.gradient(rtData2)

npts = numpy.array(numpy.int32(ds.GetNumberOfPoints()))
total_npts = numpy.array(npts)
MPI.COMM_WORLD.Allreduce([npts, MPI.INT], [total_npts, MPI.INT], MPI.SUM)

# Test simple distributed data.
testArrays(rtData, rtData2, grad, grad2, total_npts)

# Check that we can disable parallelism by using a dummy controller
# even when a global controller is set
assert algs.sum(rtData / rtData, controller=vtk.vtkDummyController()) != total_npts

# Test where arrays are NoneArray on one of the ranks.
if size > 1:
    if rank == 0:
        rtData3 = rtData2
        grad3 = grad2
    else:
        rtData3 = dsa.NoneArray
        grad3 = dsa.NoneArray

    testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test composite arrays
rtData3 = dsa.VTKCompositeDataArray([rtData, dsa.NoneArray])
grad3 = dsa.VTKCompositeDataArray([dsa.NoneArray, grad])
Esempio n. 4
0
  os.system(adaptivemeshcmd )

  
  # load vtk data
  for iii in [2,3,4]:
     vtkReader = vtk.vtkUnstructuredGridReader()
     vtkReader.SetFileName( "%s.%d.vtk" % (options.output,iii) )
     vtkReader.Update()


     ## vtkNew<vtkDummyController> controller;
     ## controller->Initialize(&argc, &argv, 1);
     ## vtkMultiProcessController::SetGlobalController(controller.Get());
     ## HACK for parallel write
     ## https://www.paraview.org/Bug/view.php?id=15813
     controller =vtk.vtkDummyController()
     vtk.vtkMultiProcessController.SetGlobalController(controller)

     # convert to exodus 
     vtkExodusIIWriter = vtk.vtkExodusIIWriter()
     #vtkExodusIIWriter.DebugOn()
     vtkExodusIIWriter.SetFileName("%s.%d.exo" % (options.output,iii))
     vtkExodusIIWriter.SetInputData( vtkReader.GetOutput() )
     print vtkExodusIIWriter
     vtkExodusIIWriter.Update()

else:
  parser.print_help()
  print options
## with open("isosurface.1.node") as myfile:
##     lines = [list(filter(len,line.strip().split(' '))) for line in myfile]