def test(self): p = dm.vtkPartitionedDataSet() s = ic.vtkRTAnalyticSource() s.SetWholeExtent(0, 10, 0, 10, 0, 5) s.Update() p1 = dm.vtkImageData() p1.ShallowCopy(s.GetOutput()) s.SetWholeExtent(0, 10, 0, 10, 5, 10) s.Update() p2 = dm.vtkImageData() p2.ShallowCopy(s.GetOutput()) p.SetPartition(0, p1) p.SetPartition(1, p2) p2 = dm.vtkPartitionedDataSet() p2.ShallowCopy(p) c = dm.vtkPartitionedDataSetCollection() c.SetPartitionedDataSet(0, p) c.SetPartitionedDataSet(1, p2) s = SimpleFilter() s.SetInputDataObject(c) s.Update() for i in (0, 1): self.assertEqual( s.GetOutputDataObject(0).GetPartitionedDataSet( i).GetFieldData().GetArray("counter").GetValue(0), i)
def test(self): p = dm.vtkPartitionedDataSet() s = ic.vtkRTAnalyticSource() s.SetWholeExtent(0, 10, 0, 10, 0, 5) s.Update() p1 = dm.vtkImageData() p1.ShallowCopy(s.GetOutput()) s.SetWholeExtent(0, 10, 0, 10, 5, 10) s.Update() p2 = dm.vtkImageData() p2.ShallowCopy(s.GetOutput()) p.SetPartition(0, p1) p.SetPartition(1, p2) p2 = dm.vtkPartitionedDataSet() p2.ShallowCopy(p) c = dm.vtkPartitionedDataSetCollection() c.SetPartitionedDataSet(0, p) c.SetPartitionedDataSet(1, p2) s = SimpleFilter() s.SetInputDataObject(c) s.Update() for i in (0,1): self.assertEqual(s.GetOutputDataObject(0).GetPartitionedDataSet(i).GetFieldData().GetArray("counter").GetValue(0), i)
def test(self): p = dm.vtkPartitionedDataSet() s = ic.vtkRTAnalyticSource() s.SetWholeExtent(0, 10, 0, 10, 0, 5) s.Update() p1 = dm.vtkImageData() p1.ShallowCopy(s.GetOutput()) s.SetWholeExtent(0, 10, 0, 10, 5, 10) s.Update() p2 = dm.vtkImageData() p2.ShallowCopy(s.GetOutput()) p.SetPartition(0, p1) p.SetPartition(1, p2) p2 = dm.vtkPartitionedDataSet() p2.ShallowCopy(p) c = dm.vtkPartitionedDataSetCollection() c.SetPartitionedDataSet(0, p) c.SetPartitionedDataSet(1, p2) tmpdir = vtkGetTempDir() fname = tmpdir + "/testcompowriread.vtk" w = il.vtkCompositeDataWriter() w.SetInputData(c) w.SetFileName(fname) w.Write() r = il.vtkCompositeDataReader() r.SetFileName(fname) r.Update() o = r.GetOutputDataObject(0) self.assertTrue(o.IsA("vtkPartitionedDataSetCollection")) nd = o.GetNumberOfPartitionedDataSets() self.assertEqual(nd, 2) for i in range(nd): p = o.GetPartitionedDataSet(i) p2 = c.GetPartitionedDataSet(i) self.assertTrue(p.IsA("vtkPartitionedDataSet")) self.assertEqual(p.GetNumberOfPartitions(), 2) self.assertEqual( p.GetPartition(0).GetNumberOfCells(), p.GetPartition(0).GetNumberOfCells()) del (r) import gc gc.collect() os.remove(fname)
def test(self): p = dm.vtkPartitionedDataSet() s = ic.vtkRTAnalyticSource() s.SetWholeExtent(0, 10, 0, 10, 0, 5) s.Update() p1 = dm.vtkImageData() p1.ShallowCopy(s.GetOutput()) s.SetWholeExtent(0, 10, 0, 10, 5, 10) s.Update() p2 = dm.vtkImageData() p2.ShallowCopy(s.GetOutput()) p.SetPartition(0, p1) p.SetPartition(1, p2) p2 = dm.vtkPartitionedDataSet() p2.ShallowCopy(p) c = dm.vtkPartitionedDataSetCollection() c.SetPartitionedDataSet(0, p) c.SetPartitionedDataSet(1, p2) tmpdir = vtkGetTempDir() fname = tmpdir+"/testcompowriread.vtk" w = il.vtkCompositeDataWriter() w.SetInputData(c) w.SetFileName(fname) w.Write() r = il.vtkCompositeDataReader() r.SetFileName(fname) r.Update() o = r.GetOutputDataObject(0) self.assertTrue(o.IsA("vtkPartitionedDataSetCollection")) nd = o.GetNumberOfPartitionedDataSets() self.assertEqual(nd, 2) for i in range(nd): p = o.GetPartitionedDataSet(i) p2 = c.GetPartitionedDataSet(i) self.assertTrue(p.IsA("vtkPartitionedDataSet")) self.assertEqual(p.GetNumberOfPartitions(), 2) self.assertEqual(p.GetPartition(0).GetNumberOfCells(), p.GetPartition(0).GetNumberOfCells()) del(r) import gc gc.collect() os.remove(fname)
def _RequestParticleData(self, executive, poutput, outInfo): from vtkmodules.numpy_interface import dataset_adapter as dsa from vtkmodules.vtkCommonDataModel import vtkUnstructuredGrid, vtkPartitionedDataSet piece = outInfo.Get(executive.UPDATE_PIECE_NUMBER()) npieces = outInfo.Get(executive.UPDATE_NUMBER_OF_PIECES()) data_time = self._get_update_time(outInfo) idx = self._timemap[data_time] itr = self._series.iterations[idx] array_by_species = {} narrays = self._particlearrayselection.GetNumberOfArrays() for i in range(narrays): if self._particlearrayselection.GetArraySetting(i): name = self._particlearrayselection.GetArrayName(i) names = self._get_particle_array_and_component(itr, name) if names[0] and self._speciesselection.ArrayIsEnabled( names[0]): if not names[0] in array_by_species: array_by_species[names[0]] = [] array_by_species[names[0]].append(names) ids = 0 for species, arrays in array_by_species.items(): pds = vtkPartitionedDataSet() ugrid = vtkUnstructuredGrid() pds.SetPartition(0, ugrid) poutput.SetPartitionedDataSet(ids, pds) ids += 1 self._load_species(itr, species, arrays, piece, npieces, dsa.WrapDataObject(ugrid))
def test(self): p = dm.vtkPartitionedDataSet() s = ic.vtkRTAnalyticSource() s.SetWholeExtent(0, 10, 0, 10, 0, 5) s.Update() p1 = dm.vtkImageData() p1.ShallowCopy(s.GetOutput()) s.SetWholeExtent(0, 10, 0, 10, 5, 10) s.Update() p2 = dm.vtkImageData() p2.ShallowCopy(s.GetOutput()) p.SetPartition(0, p1) p.SetPartition(1, p2) tmpdir = vtkGetTempDir() fname = tmpdir+"/testxmlpartds.vtpd" w = ixml.vtkXMLPartitionedDataSetWriter() w.SetInputData(p) w.SetFileName(fname) w.Write() r = ixml.vtkXMLPartitionedDataSetReader() r.SetFileName(fname) r.Update() o = r.GetOutputDataObject(0) print(o.IsA("vtkPartitionedDataSet")) np = o.GetNumberOfPartitions() self.assertEqual(np, 2) for i in range(np): d = o.GetPartition(i) d2 = p.GetPartition(i) self.assertTrue(d.IsA("vtkImageData")) self.assertEqual(d.GetNumberOfCells(), d2.GetNumberOfCells()) os.remove(fname)
def RequestData(self, request, inInfoVec, outInfoVec): from vtkmodules.vtkCommonCore import vtkFloatArray, vtkPoints from vtkmodules.vtkCommonDataModel import ( vtkCellArray, vtkCompositeDataSet, vtkPartitionedDataSet, vtkPartitionedDataSetCollection, vtkPolyData) output = vtkPartitionedDataSetCollection.GetData(outInfoVec) partitioned_datasets = [] partitioned_dataset_names = [] # Parse line file if not self._filename: print_error("SAVGReader requires a FileName") return 0 # Stores lines of text from the file associated with each group of # geometries encountered. geometries = {"lines": [], "points": [], "poly": []} # Read the file and build up data structure to hold the primitives with open(self._filename, "r") as file: current = None for line in file: parts = line.split("#") line = parts[0].strip().lower() if len(line) < 1: continue if not_supported(line): continue if line.startswith("lin"): geometries["lines"].append({"rgba": None, "values": []}) current = geometries["lines"][-1] line_parts = line.split(" ") if len(line_parts) == 5: current["rgba"] = [float(n) for n in line_parts[1:]] elif line.startswith("point"): geometries["points"].append({ "rgba": None, "values": [], }) current = geometries["points"][-1] line_parts = line.split(" ") if len(line_parts) == 5: current["rgba"] = [float(n) for n in line_parts[1:]] elif line.startswith("poly"): geometries["poly"].append({ "rgba": None, "npts": None, "values": [], }) current = geometries["poly"][-1] line_parts = line.split(" ") if len(line_parts) == 2: current["npts"] = int(line_parts[1]) elif len(line_parts) == 6: current["rgba"] = [float(n) for n in line_parts[1:5]] current["npts"] = int(line_parts[5]) elif line.startswith("end"): current = None else: if current is not None: if "npts" in current and current["npts"] is not None: # polygon, known num pts per poly if len(current["values"]) == current["npts"]: # Reached the number of points for the current one, # start a new one. geometries["poly"].append({ "npts": current["npts"], "rgba": current["rgba"], "values": [] }) current = geometries["poly"][-1] pt, pt_col, pt_n = get_coords_from_line(line) if pt: current["values"].append({ "pos": pt, }) color = pt_col or current["rgba"] if color: current["values"][-1]["col"] = color if pt_n: current["values"][-1]["norm"] = pt_n # Build lines polydata if there were any lines if geometries["lines"]: line_points = vtkPoints() line_cells = vtkCellArray() line_point_colors = vtkFloatArray() line_point_colors.SetNumberOfComponents(4) line_point_colors.SetName("rgba_colors") line_point_normals = vtkFloatArray() line_point_normals.SetNumberOfComponents(3) line_point_normals.SetName("vertex_normals") pt_count = 0 for batch in geometries["lines"]: num_in_batch = len(batch["values"]) first_in_batch = True for coord in batch["values"]: if "pos" in coord: line_points.InsertNextPoint(coord["pos"]) if "norm" in coord: line_point_normals.InsertNextTuple(coord["norm"]) if "col" in coord: line_point_colors.InsertNextTuple(coord["col"]) if first_in_batch: line_cells.InsertNextCell(num_in_batch) first_in_batch = False line_cells.InsertCellPoint(pt_count) pt_count += 1 output_lines = vtkPolyData() output_lines.SetPoints(line_points) output_lines.SetLines(line_cells) if line_point_colors.GetNumberOfTuples() > 0: output_lines.GetPointData().AddArray(line_point_colors) if line_point_normals.GetNumberOfTuples() > 0: output_lines.GetPointData().AddArray(line_point_normals) ds = vtkPartitionedDataSet() ds.SetNumberOfPartitions(1) ds.SetPartition(0, output_lines) partitioned_datasets.append(ds) partitioned_dataset_names.append("Lines") # Build the points polydata if we found points if geometries["points"]: p_points = vtkPoints() p_cells = vtkCellArray() p_point_colors = vtkFloatArray() p_point_colors.SetNumberOfComponents(4) p_point_colors.SetName("rgba_colors") p_point_normals = vtkFloatArray() p_point_normals.SetNumberOfComponents(3) p_point_normals.SetName("vertex_normals") p_count = 0 for batch in geometries["points"]: num_in_batch = len(batch["values"]) first_in_batch = True for coord in batch["values"]: if "pos" in coord: p_points.InsertNextPoint(coord["pos"]) if "norm" in coord: p_point_normals.InsertNextTuple(coord["norm"]) if "col" in coord: p_point_colors.InsertNextTuple(coord["col"]) if first_in_batch: p_cells.InsertNextCell(num_in_batch) first_in_batch = False p_cells.InsertCellPoint(p_count) p_count += 1 output_points = vtkPolyData() output_points.SetPoints(p_points) output_points.SetVerts(p_cells) if p_point_colors.GetNumberOfTuples() > 0: output_points.GetPointData().AddArray(p_point_colors) if p_point_normals.GetNumberOfTuples() > 0: output_points.GetPointData().AddArray(p_point_normals) ds = vtkPartitionedDataSet() ds.SetNumberOfPartitions(1) ds.SetPartition(0, output_points) partitioned_datasets.append(ds) partitioned_dataset_names.append("Points") # Build the polygons if there were any if geometries["poly"]: poly_points = vtkPoints() poly_cells = vtkCellArray() poly_point_colors = vtkFloatArray() poly_point_colors.SetNumberOfComponents(4) poly_point_colors.SetName("rgba_colors") poly_point_normals = vtkFloatArray() poly_point_normals.SetNumberOfComponents(3) poly_point_normals.SetName("vertex_normals") pt_count = 0 for batch in geometries["poly"]: num_in_batch = len(batch["values"]) if num_in_batch < 1: continue first_in_batch = True for coord in batch["values"]: if "pos" in coord: poly_points.InsertNextPoint(coord["pos"]) if "norm" in coord: poly_point_normals.InsertNextTuple(coord["norm"]) if "col" in coord: poly_point_colors.InsertNextTuple(coord["col"]) if first_in_batch: np_in_cell = num_in_batch poly_cells.InsertNextCell(np_in_cell) first_in_batch = False poly_cells.InsertCellPoint(pt_count) pt_count += 1 output_polys = vtkPolyData() output_polys.SetPoints(poly_points) output_polys.SetPolys(poly_cells) if poly_point_colors.GetNumberOfTuples() > 0: output_polys.GetPointData().AddArray(poly_point_colors) if poly_point_normals.GetNumberOfTuples() > 0: output_polys.GetPointData().AddArray(poly_point_normals) ds = vtkPartitionedDataSet() ds.SetNumberOfPartitions(1) ds.SetPartition(0, output_polys) partitioned_datasets.append(ds) partitioned_dataset_names.append("Polygons") # Add any partioned datasets we created output.SetNumberOfPartitionedDataSets(len(partitioned_datasets)) for idx, pds in enumerate(partitioned_datasets): output.SetPartitionedDataSet(idx, pds) output.GetMetaData(idx).Set(vtkCompositeDataSet.NAME(), partitioned_dataset_names[idx]) return 1
def RequestData(self, request, inInfoVec, outInfoVec): global _has_openpmd if not _has_openpmd: print_error("Required Python module 'openpmd_api' missing!") return 0 from vtkmodules.vtkCommonDataModel import vtkImageData, vtkUnstructuredGrid from vtkmodules.vtkCommonDataModel import vtkPartitionedDataSet, vtkPartitionedDataSetCollection from vtkmodules.vtkCommonExecutionModel import vtkExtentTranslator, vtkStreamingDemandDrivenPipeline from vtkmodules.numpy_interface import dataset_adapter as dsa executive = vtkStreamingDemandDrivenPipeline output = vtkPartitionedDataSet.GetData(outInfoVec, 0) poutput = vtkPartitionedDataSetCollection.GetData(outInfoVec, 1) outInfo = outInfoVec.GetInformationObject(0) piece = outInfo.Get(executive.UPDATE_PIECE_NUMBER()) npieces = outInfo.Get(executive.UPDATE_NUMBER_OF_PIECES()) nghosts = outInfo.Get(executive.UPDATE_NUMBER_OF_GHOST_LEVELS()) et = vtkExtentTranslator() data_time = self._get_update_time(outInfo) idx = self._timemap[data_time] itr = self._series.iterations[idx] arrays = [] narrays = self._arrayselection.GetNumberOfArrays() for i in range(narrays): if self._arrayselection.GetArraySetting(i): name = self._arrayselection.GetArrayName(i) arrays.append((name, self._find_array(itr, name))) shp = None spacing = None theta_modes = None grid_offset = None for _, ary in arrays: var = ary[0] for name, scalar in var.items(): shape = scalar.shape break spc = list(ary[1]) if not spacing: spacing = spc elif spacing != spc: # all meshes need to have the same spacing return 0 offset = list(ary[2]) if not grid_offset: grid_offset = offset elif grid_offset != offset: # all meshes need to have the same spacing return 0 if not shp: shp = shape elif shape != shp: # all arrays needs to have the same shape return 0 if not theta_modes: theta_modes = ary[3] if theta_modes: et.SetWholeExtent(0, shp[0] - 1, 0, shp[1] - 1, 0, shp[2] - 1) et.SetSplitModeToZSlab() # note: Y and Z are both fine et.SetPiece(piece) et.SetNumberOfPieces(npieces) # et.SetGhostLevel(nghosts) et.PieceToExtentByPoints() ext = et.GetExtent() chunk_offset = [ext[0], ext[2], ext[4]] chunk_extent = [ ext[1] - ext[0] + 1, ext[3] - ext[2] + 1, ext[5] - ext[4] + 1 ] data = [] nthetas = 100 # user parameter thetas = np.linspace(0., 2. * np.pi, nthetas) chunk_cyl_shape = (chunk_extent[1], chunk_extent[2], nthetas ) # z, r, theta for name, var in arrays: cyl_values = np.zeros(chunk_cyl_shape) values = self._load_array(var[0], chunk_offset, chunk_extent) self._series.flush() print(chunk_cyl_shape) print(values.shape) print("+++++++++++") for ntheta in range(nthetas): cyl_values[:, :, ntheta] += values[0, :, :] data.append((name, cyl_values)) # add all other modes via loop # for m in range(theta_modes): cyl_spacing = [spacing[0], spacing[1], thetas[1] - thetas[0]] z_coord = np.linspace(0., cyl_spacing[0] * chunk_cyl_shape[0], chunk_cyl_shape[0]) r_coord = np.linspace(0., cyl_spacing[1] * chunk_cyl_shape[1], chunk_cyl_shape[1]) t_coord = thetas # to cartesian print(z_coord.shape, r_coord.shape, t_coord.shape) cyl_coords = np.meshgrid(r_coord, z_coord, t_coord) rs = cyl_coords[1] zs = cyl_coords[0] thetas = cyl_coords[2] y_coord = rs * np.sin(thetas) x_coord = rs * np.cos(thetas) z_coord = zs # mesh_pts = np.zeros((chunk_cyl_shape[0], chunk_cyl_shape[1], chunk_cyl_shape[2], 3)) # mesh_pts[:, :, :, 0] = z_coord img = vtkImageData() img.SetExtent(chunk_offset[1], chunk_offset[1] + chunk_cyl_shape[0] - 1, chunk_offset[2], chunk_offset[2] + chunk_cyl_shape[1] - 1, 0, nthetas - 1) img.SetSpacing(cyl_spacing) imgw = dsa.WrapDataObject(img) output.SetPartition(0, img) for name, array in data: # print(array.shape) # print(array.transpose(2,1,0).flatten(order='C').shape) imgw.PointData.append( array.transpose(2, 1, 0).flatten(order='C'), name) # data = [] # for name, var in arrays: # unit_SI = var[0].unit_SI # data.append((name, unit_SI * var[0].load_chunk(chunk_offset, chunk_extent))) # self._series.flush() else: et.SetWholeExtent(0, shp[0] - 1, 0, shp[1] - 1, 0, shp[2] - 1) et.SetPiece(piece) et.SetNumberOfPieces(npieces) et.SetGhostLevel(nghosts) et.PieceToExtent() ext = et.GetExtent() chunk_offset = [ext[0], ext[2], ext[4]] chunk_extent = [ ext[1] - ext[0] + 1, ext[3] - ext[2] + 1, ext[5] - ext[4] + 1 ] data = [] for name, var in arrays: values = self._load_array(var[0], chunk_offset, chunk_extent) self._series.flush() data.append((name, values)) img = vtkImageData() img.SetExtent(ext[0], ext[1], ext[2], ext[3], ext[4], ext[5]) img.SetSpacing(spacing) img.SetOrigin(grid_offset) et.SetGhostLevel(0) et.PieceToExtent() ext = et.GetExtent() ext = [ext[0], ext[1], ext[2], ext[3], ext[4], ext[5]] img.GenerateGhostArray(ext) imgw = dsa.WrapDataObject(img) output.SetPartition(0, img) for name, array in data: imgw.PointData.append(array, name) itr = self._series.iterations[idx] array_by_species = {} narrays = self._particlearrayselection.GetNumberOfArrays() for i in range(narrays): if self._particlearrayselection.GetArraySetting(i): name = self._particlearrayselection.GetArrayName(i) names = self._get_particle_array_and_component(itr, name) if names[0] and self._speciesselection.ArrayIsEnabled( names[0]): if not names[0] in array_by_species: array_by_species[names[0]] = [] array_by_species[names[0]].append(names) ids = 0 for species, arrays in array_by_species.items(): pds = vtkPartitionedDataSet() ugrid = vtkUnstructuredGrid() pds.SetPartition(0, ugrid) poutput.SetPartitionedDataSet(ids, pds) ids += 1 self._load_species(itr, species, arrays, piece, npieces, dsa.WrapDataObject(ugrid)) return 1
def test(self): p = dm.vtkPartitionedDataSet() s = ic.vtkRTAnalyticSource() s.SetWholeExtent(0, 10, 0, 10, 0, 5) s.Update() p1 = dm.vtkImageData() p1.ShallowCopy(s.GetOutput()) s.SetWholeExtent(0, 10, 0, 10, 5, 10) s.Update() p2 = dm.vtkImageData() p2.ShallowCopy(s.GetOutput()) p.SetPartition(0, p1) p.SetPartition(1, p2) p2 = dm.vtkPartitionedDataSet() p2.ShallowCopy(p) c = dm.vtkPartitionedDataSetCollection() c.SetPartitionedDataSet(0, p) c.SetPartitionedDataSet(1, p2) # SimpleFilter: sf = SimpleFilter() sf.SetInputDataObject(c) sf.Update() self.assertEqual(sf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 2) for i in (0, 1): pdsc = sf.GetOutputDataObject(0) self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection") pds = pdsc.GetPartitionedDataSet(i) self.assertEqual(pds.GetClassName(), "vtkPartitionedDataSet") self.assertEqual(pds.GetNumberOfPartitions(), 2) for j in (0, 1): part = pds.GetPartition(j) countArray = part.GetFieldData().GetArray("counter") info = countArray.GetInformation() self.assertEqual(countArray.GetValue(0), i * 2 + j); self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkImageData") # PartitionAwareFilter pf = PartitionAwareFilter() pf.SetInputDataObject(c) pf.Update() self.assertEqual(pf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 2) for i in (0, 1): pdsc = pf.GetOutputDataObject(0) self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection") pds = pdsc.GetPartitionedDataSet(i) self.assertEqual(pds.GetClassName(), "vtkPartitionedDataSet") self.assertEqual(pds.GetNumberOfPartitions(), 0) countArray = pds.GetFieldData().GetArray("counter") info = countArray.GetInformation() self.assertEqual(countArray.GetValue(0), i); self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSet") # PartitionCollectionAwareFilter pcf = PartitionCollectionAwareFilter() pcf.SetInputDataObject(c) pcf.Update() self.assertEqual(pcf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 0) pdsc = pcf.GetOutputDataObject(0) self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection") countArray = pdsc.GetFieldData().GetArray("counter") info = countArray.GetInformation() self.assertEqual(countArray.GetValue(0), 0); self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSetCollection") # CompositeAwareFilter cf = CompositeAwareFilter() cf.SetInputDataObject(c) cf.Update() self.assertEqual(pcf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 0) pdsc = pcf.GetOutputDataObject(0) self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection") countArray = pdsc.GetFieldData().GetArray("counter") info = countArray.GetInformation() self.assertEqual(countArray.GetValue(0), 0); self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSetCollection")
def test(self): p = dm.vtkPartitionedDataSet() s = ic.vtkRTAnalyticSource() s.SetWholeExtent(0, 10, 0, 10, 0, 5) s.Update() p1 = dm.vtkImageData() p1.ShallowCopy(s.GetOutput()) s.SetWholeExtent(0, 10, 0, 10, 5, 10) s.Update() p2 = dm.vtkImageData() p2.ShallowCopy(s.GetOutput()) p.SetPartition(0, p1) p.SetPartition(1, p2) p2 = dm.vtkPartitionedDataSet() p2.ShallowCopy(p) c = dm.vtkPartitionedDataSetCollection() c.SetPartitionedDataSet(0, p) c.SetPartitionedDataSet(1, p2) # SimpleFilter: sf = SimpleFilter() sf.SetInputDataObject(c) sf.Update() self.assertEqual( sf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 2) for i in (0, 1): pdsc = sf.GetOutputDataObject(0) self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection") pds = pdsc.GetPartitionedDataSet(i) self.assertEqual(pds.GetClassName(), "vtkPartitionedDataSet") self.assertEqual(pds.GetNumberOfPartitions(), 2) for j in (0, 1): part = pds.GetPartition(j) countArray = part.GetFieldData().GetArray("counter") info = countArray.GetInformation() self.assertEqual(countArray.GetValue(0), i * 2 + j) self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkImageData") # PartitionAwareFilter pf = PartitionAwareFilter() pf.SetInputDataObject(c) pf.Update() self.assertEqual( pf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 2) for i in (0, 1): pdsc = pf.GetOutputDataObject(0) self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection") pds = pdsc.GetPartitionedDataSet(i) self.assertEqual(pds.GetClassName(), "vtkPartitionedDataSet") self.assertEqual(pds.GetNumberOfPartitions(), 0) countArray = pds.GetFieldData().GetArray("counter") info = countArray.GetInformation() self.assertEqual(countArray.GetValue(0), i) self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSet") # PartitionCollectionAwareFilter pcf = PartitionCollectionAwareFilter() pcf.SetInputDataObject(c) pcf.Update() self.assertEqual( pcf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 0) pdsc = pcf.GetOutputDataObject(0) self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection") countArray = pdsc.GetFieldData().GetArray("counter") info = countArray.GetInformation() self.assertEqual(countArray.GetValue(0), 0) self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSetCollection") # CompositeAwareFilter cf = CompositeAwareFilter() cf.SetInputDataObject(c) cf.Update() self.assertEqual( pcf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 0) pdsc = pcf.GetOutputDataObject(0) self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection") countArray = pdsc.GetFieldData().GetArray("counter") info = countArray.GetInformation() self.assertEqual(countArray.GetValue(0), 0) self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSetCollection")