def extractVTK(vtk_path, clipVal): vol = XMLImageDataReader(FileName=vtk_path) vol.UpdatePipeline() # Get extents (voxels) and bounds (mm) extents = vol.GetDataInformation().GetExtent() bounds = vol.GetDataInformation().GetBounds() volShape = np.array(extents[1::2]) + 1 # Get slices slice_jets = getSlice(vol, volShape, y=-3) slice_impingement = getSlice(vol, volShape, y=1) slice_near = getSlice(vol, volShape, y=5) slice_far = getSlice(vol, volShape, y=9) slices = [slice_jets, slice_impingement, slice_near, slice_far] # Get smoothed surface surface = getSurface(vol, clipVal) # Get grid information as vectors for X/Y/Z (mm) grid = [None] * 3 grid[0] = np.linspace(bounds[0], bounds[1], volShape[0]) grid[1] = np.linspace(bounds[2], bounds[3], volShape[1]) grid[2] = np.linspace(bounds[4], bounds[5], volShape[2]) # Get dx/dy/dz dx = np.abs(grid[0][1] - grid[0][0]) dy = np.abs(grid[1][1] - grid[1][0]) dz = np.abs(grid[2][1] - grid[2][0]) # Get volume mass mass = dsa.WrapDataObject(sm.Fetch(vol)).PointData[0].sum() mass *= (dx * dy * dz) return slices, grid, surface, mass
def getSlice(vol, volShape, y): # Create slice slicer = Slice(Input=vol) slicer.SliceType = 'Plane' slicer.SliceOffsetValues = [0.0] slicer.Crinkleslice = 0 slicer.Triangulatetheslice = 1 slicer.Mergeduplicatedpointsintheslice = 1 # Get slice through chosen Y value slicer.SliceType.Origin = [0, y, 0] # Get Y slices slicer.SliceType.Normal = [0, 1, 0] # Update pipeline slicer.UpdatePipeline() # Convert slice to numpy array slice_numpy = sm.Fetch(slicer) slice_numpy = dsa.WrapDataObject(slice_numpy) slice_numpy = slice_numpy.PointData[0] # Re-shape vector into 2D slice # Re-shaping with transpose because the other way messes up the layout # Should probably read into how striding works in NumPy... slice_numpy = np.array(slice_numpy).reshape((volShape[2], volShape[0])).T # Convert from int16 back to LVF as float slice_numpy = slice_numpy.astype('float32') / 1000 return slice_numpy
def DoCoProcessing(datadescription): "Callback to do co-processing for current timestep" global coprocessor timestep = datadescription.GetTimeStep() print("Timestep: %d Time: %f" % ( timestep, datadescription.GetTime())) # Update the coprocessor by providing it the newly generated simulation data. # If the pipeline hasn't been setup yet, this will setup the pipeline. coprocessor.UpdateProducers(datadescription) pipeline = coprocessor.Pipeline grid = servermanager.Fetch(pipeline.Wavelet1) array = grid.GetPointData().GetArray("RTData") array_range = array.GetRange() if timestep == 0: if array_range[0] < 37 or array_range[0] > 38 or array_range[1] < 276 or array_range[1] > 277: print('ERROR: bad range of %f for step 0' % array_range) sys.exit(1) if timestep == 1: if array_range[0] < 74 or array_range[0] > 76 or array_range[1] < 443 or array_range[1] > 445: print('ERROR: bad range of %f for step 1' % array_range) sys.exit(1) if timestep == 2: if array_range[0] < 77 or array_range[0] > 79 or array_range[1] < 357 or array_range[1] > 458: print('ERROR: bad range of %f for step 2' % array_range) sys.exit(1) if timestep == 3: if array_range[0] < -43 or array_range[0] > 44 or array_range[1] < 304 or array_range[1] > 305: print('ERROR: bad range of %f for step 3' % array_range) sys.exit(1)
def point_data(self): vtk_object = sm.Fetch(self.filter) vtk_object = dsa.WrapDataObject(vtk_object) pd_df = pd.DataFrame() for key in self.point_keys: temp_dataset = np.array(vtk_object.PointData[key]).transpose() if len(temp_dataset.shape) != 1: # The dataset is a vector: for idx, vector_element in enumerate(temp_dataset): new_key = f"{key}{self.vector_keys[idx]}" pd_df[new_key] = vector_element else: pd_df[key] = temp_dataset return pd_df.dropna()
def setAxisLabelsFromBounds(name, num=(10, 10, 5)): """Sets the axis labels from a given input data source. Use the num argument to control the number of labels along each axis. If num is a scalar, then a uniform number of labels is used on each axis. Args: name (str): The string name of the input source on the data pipeline num (tuple(int) or int): the number of labels for each axis Example: >>> import pvmacros as pvm >>> pvm.vis.setAxisLabelsFromBounds('TableToPoints1', num=(5, 10, 2)) """ import paraview.simple as pvs import paraview.servermanager as sm import numpy as np # Get the input data src = pvs.FindSource(name) data = sm.Fetch(src) xmin, xmax, ymin, ymax, zmin, zmax = data.GetBounds() if not isinstance(num, (tuple, list)): num = list(num) # Cast as ints if needed for i, val in enumerate(num): if not isinstance(val, int): num[i] = int(val) # Calculate ranges for each axis xrng = np.linspace(xmin, xmax, num=num[0]) yrng = np.linspace(ymin, ymax, num=num[1]) zrng = np.linspace(zmin, zmax, num=num[2]) # Set the axis labels customAxisTicks(xrng, axis=0, uniform=False) customAxisTicks(yrng, axis=1, uniform=False) customAxisTicks(zrng, axis=2, uniform=False) return
def field_keys(self): vtk_object = sm.Fetch(self.filter) vtk_object = dsa.WrapDataObject(vtk_object) return vtk_object.FieldData.keys()
point=PointSource(Center=[0.0,0.5,0.0],NumberOfPoints=1) pprobe=ProbePoint(Source=point,Input=reader) outfile = open("height.txt",'w') for time in timesteps: print "Time =" + str(time) outfile.write(str(time)) phi_old = 99.99 height = 0.0 # Column height probes[0].UpdatePipeline (time) fp = servermanager.Fetch(probes[0]) pdata = fp.GetPointData() for i in range(opts.resolution+1): phi = pdata.GetArray("phid").GetTuple1(i) print phi if (phi > 0.0) and (phi_old < 0.0): height = 0.35* (float(i-1) + (phi_old/(phi_old-phi)))/float(opts.resolution) phi_old=phi if (height > 0.0): phi = 111.0 while abs(phi) > opts.accuracy: point.Center=[opts.eps, 0.073, height] fp2 = servermanager.Fetch(pprobe) pdata2= fp2.GetPointData() phi = pdata2.GetArray("phid").GetTuple1(0)
import os import os.path import sys import paraview paraview.compatibility.major = 3 paraview.compatibility.minor = 4 from paraview import servermanager from paraview import util smtesting.ProcessCommandLineArguments() servermanager.Connect() file1 = os.path.join(smtesting.DataDir, "Data/quadraticTetra01.vtu") reader1 = servermanager.sources.XMLUnstructuredGridReader(FileName=file1) reader1Output = servermanager.Fetch(reader1) # General 3D cell integVal = util.IntegrateCell(reader1Output, 0) if integVal < 0.128 or integVal > 0.1285: print "ERROR: incorrect result for cell 0 of 1st dataset" sys.exit(1) # General 2D cell if util.IntegrateCell(reader1Output, 1) != 0.625: print "ERROR: incorrect result for cell 1 of 1st dataset" sys.exit(1) file2 = os.path.join(smtesting.DataDir, "Data/elements.vtu") reader2 = servermanager.sources.XMLUnstructuredGridReader(FileName=file2) reader2Output = servermanager.Fetch(reader2)
#### Verify that Merge Blocks produces polydata output when inputs are polydata from paraview.simple import * from paraview import servermanager as sm from paraview import smtesting import os; # This test makes sure that the GMV reader can read vertices and lines. smtesting.ProcessCommandLineArguments() GMVDir = os.path.join(smtesting.DataDir, "Plugins", "GMVReader", "Testing", "Data", "GMV") # load plugin LoadDistributedPlugin('GMVReader', ns=globals()) # create a new 'GMV Reader' one_vertexgmv = GMVReader(registrationName='one_vertex.gmv', FileNames=[os.path.join(GMVDir, "one_vertex.gmv")]) one_vertexgmv.CellArrayStatus = ['material id'] assert(sm.Fetch(one_vertexgmv).GetBlock(1).GetNumberOfVerts() == 3) # create a new 'GMV Reader' two_vertexgmv = GMVReader(registrationName='two_vertex.gmv', FileNames=[os.path.join(GMVDir, "two_vertex.gmv")]) two_vertexgmv.CellArrayStatus = ['material id'] assert(sm.Fetch(two_vertexgmv).GetBlock(1).GetNumberOfLines() == 2)
def data_keys(self): _vtk_object = sm.Fetch(self.vtk_file) return self.vtk_file.PointData.keys()
import os import vtk import math import numpy import paraview.servermanager S = GetActiveSource() FileName = os.path.basename(GetActiveSource().FileNames[0]) FileName = os.path.splitext(FileName)[0] Normals = GenerateSurfaceNormals(Input=S, ComputeCellNormals=1) Triangulate1 = Triangulate(Normals) Surface = servermanager.Fetch(Triangulate1) Points = Surface.GetPoints() Cells = Surface.GetCellData() Vectors = Cells.GetArray(0) L = Triangulate1.GetDataInformation().GetBounds() Lx = L[1] - L[0] Ly = L[3] - L[2] Lz = L[5] - L[4] clip1 = Clip(Input=Triangulate1) clip1.ClipType = 'Box' clip1.Scalars = ['POINTS', '']
#extractSurface1Display = Show(extractSurface1, renderView1) #extractSurface1Display.ColorArrayName = [None, ''] #Hide(S, renderView1) #Cleaning clean1 = Clean(Input=extractSurface1) #Show #clean1Display = Show(clean1, renderView1) #clean1Display.ColorArrayName = [None, ''] #Hide(S, renderView1) #renderView1.ResetCamera() #Getting PolyData points Points = servermanager.Fetch(clean1).GetPoints() #Finding ids of extreme points zmax = 0 zmin = Lz ymin = Ly ymin1 = Ly ymin2 = Ly if fold_on_right_side: for m in range(0,Points.GetNumberOfPoints()): r = Points.GetPoint(m) if r[0] > L[1] - 0.2*Lx: if r[1] < ymin: k = m ymin = r[1] if (r[0] > 0.75*L[1]) and (r[2] < (L[4]+0.25*Lz+0.5)) and (r[1] < ymin1):
#!/usr/bin/env python import os import vtk import math import numpy from paraview.simple import * import paraview.servermanager #RootFolder = '/Volumes/WAILERS/UCI/Collaborators/Marcos/Data/NoFoldExperiment-TimeVaryingData/18C/Control' #File = 'CA_9_18C_Ctrl_A1' FilePath = raw_input("prompt") S = ExtractSurface(Input=GetActiveSource()) T = Triangulate(Input=S) conn = vtk.vtkPolyDataConnectivityFilter() conn.SetInputData(servermanager.Fetch(T)) conn.SetExtractionModeToSpecifiedRegions() conn.AddSpecifiedRegion(1) conn.Update() S = conn.GetOutput() writer = vtk.vtkPolyDataWriter() writer.SetFileName(FilePath + '-DiskDec.vtk') writer.SetInputData(S) writer.Write()
Hide(DecSurface, renderView1) # Smooth the resulting mesh SmoothSurface = Smooth(Input=DecSurface) SmoothSurface.NumberofIterations = 50 # Make sure we only have triangles TriSurface = Triangulate(SmoothSurface) Show(TriSurface, renderView1) # Exporting as VTK SaveData(Path + '/' + FileName + '.vtk', proxy=TriSurface) # Exporting as XML PolyData = servermanager.Fetch(TriSurface) Writer = vtk.vtkXMLPolyDataWriter() Writer.SetInputData(PolyData) Writer.SetFileName(Path + '/' + FileName + '.xml') Writer.Write() # Cleaning memory Delete(TriSurface) del TriSurface Delete(SmoothSurface) del SmoothSurface Delete(DecSurface) del DecSurface Delete(ScaleSurface) del ScaleSurface
def render_paraview_scene( pvd_file, outfile, field_name='m', timesteps=None, camera_position=[0, -200, +200], camera_focal_point=[0, 0, 0], camera_view_up=[0, 0, 1], view_size=(800, 600), magnification=1, fit_view_to_scene=True, color_by_axis=0, colormap='coolwarm', rescale_colormap_to_data_range=True, show_colorbar=True, colorbar_label_format='%-#5.2g', add_glyphs=True, glyph_type='cones', glyph_scale_factor=1.0, glyph_random_mode=True, glyph_mask_points=True, glyph_max_number_of_points=10000, show_orientation_axes=False, show_center_axes=False, representation="Surface With Edges", palette='screen', use_parallel_projection=False, trim_border=True, rescale=None, diffuse_color=None): """ Load a *.pvd file, render the scene in it and save the result to an image file. *Returns* An IPython.core.display.Image object containing the output image. *Arguments* pvd_file: Input filename (must be in *.pvd format). outfile: Name of the output image file (may be None, which is the default). The image type (e.g. PNG) is derived from the file extension. If multiple timesteps are to be animated, the output files will have additional suffixes of the form '_N_TIMESTEP', where N represents the index of the timestep (in the array passed as the argument `timesteps`) and TIMESTEP is the actual timestep itself. field_name: The field to plot. Default: 'm' (= the normalised magnetisation). Note that this field must of course have been saved in the .pvd file. timesteps: The timesteps for which to render the scene. The default is None, which means to animate all timesteps (and save them as a sequence of images if `outfile` is specified). Other valid values are either a single number or a list of numbers. camera_position: 3-vector camera_focal_point: 3-vector camera_view_up: 3-vector These variables control the position and orientation of the camera. view_size: pair of int Controls the size of the view. This can be used to adjust the size and aspect ratio of the visible scene (useful for example if a colorbar is present). Default: (400, 400). magnification: int Magnification factor which controls the size of the saved image. Note that due to limitations in Paraview this must be an integer. fit_view_to_scene: True | False If True (the default), the view is automatically adjusted so that the entire scene is visible. In this case the exact location of the camera is ignored and only its relative position w.r.t. the focal point is taken into account. color_by_axis: integer or string (allowed values: 0, 1, 2, -1, or 'x', 'y', 'z', 'magnitude') The vector components in the direction of this axis are used to color the plot. If '-1' is given, the vector magnitudes are used instead of any vector components. colormap: The colormap to use. Supported values: {}. rescale_colormap_to_data_range: True | False If False (default: True), the colormap corresponds to the data range [-1.0, +1.0]. If set to True, the colormap is rescaled so that it corresponds to the minimum/maximum data values *over all specified timesteps*. show_colorbar: True | False If True (the default), a colorbar is added to the plot. colorbar_label_format: string Controls how colorbar labels are formatted (e.g., how many digits are displayed, etc.). This can be any formatting string for floating point numbers as understood by Python's 'print' statement. Default: '%-#5.2g'. add_glyphs: True | False If True (the default), little glyphs are added at the mesh vertices to indicate the direction of the field vectors. glyph_type: string Type of glyphs to use. The only currently supported glyph type is 'cones'. glyph_scale_factor: float Controls the glyph size. The default value of 1.0 corresponds to a value automatically determined by the plotting function which makes the glyphs visible but keeps them small enough so that glyphs at different vertices don't overlap. This argument can be used to tweak that size (e.g. '0.5' means to use half the automatically determined size, and '3.0' three times that size, etc.). glyph_mask_points: True | False If True (the default), limit the maximum number of glyphs to the value indicated by glyph_max_number_of_points. glyph_max_number_of_points: int Specifies the maximum number of glyphs that should appear in the output dataset if glyph_mask_points is True. glyph_random_mode: True | False If True (the default), the glyph positions are chosen randomly. Otherwise the point IDs to which glyphs are attached are evenly spaced. This setting only has an effect if glyph_mask_points is True. show_orientation_axes: False | True If True (default: False), a set of three small axes is added to the scene to indicate the directions of the coordinate axes. show_center_axes: False | True If True (default: False), a set of three axes is plotted at the center of rotation. representation: string Controls the way in which the visual representation of bodies in the scene. Allowed values: {} palette: 'print' | 'screen' The color scheme to be used. The main difference is that 'print' uses white as the background color whereas 'screen' uses dark grey. use_parallel_projection: True | False If False (the default), perspective projection is used to render the scene. Otherwise parallel projection is used. trim_border: True | False If True (the default), any superfluous space around the scene will be trimmed from the saved image. This requires imagemagick to be installed. rescale: float | None Factor by which the output image will be rescaled. For example, using 'rescale=0.4' will rescale the image by 40%. diffuse_color: 3-tuple of RGB values The solid color of the body. If given, this overrides any colormap-related values. """ from paraview import servermanager import paraview.simple as pv # Paraview crashes if there is no X server running, so we check # whether this is the case. if not os.environ.has_key('DISPLAY'): logger.warning( "Could not detect a running X server (this may happen, for " "example, if you are on a ssh connection without X forwarding; " "use 'ssh -X' in this case). Aborting because Paraview is " "likely to crash.") if not representation in _representations: raise ValueError("Unsupported representation: '{}'. Allowed values: " "{}".format(representation, _representations)) if abs(magnification - int(magnification)) > 1e-6: logger.warning("Due to limitations in Paraview, the 'magnification' " "argument must be an integer (got: {}). Using nearest " "integer value.".format(magnification)) magnification = int(round(magnification)) if not os.path.exists(pvd_file): raise IOError("File does not exist: '{}'.".format(pvd_file)) servermanager.Disconnect() servermanager.Connect() reader = servermanager.sources.PVDReader(FileName=pvd_file) reader.UpdatePipeline() view = servermanager.CreateRenderView() repr = servermanager.CreateRepresentation(reader, view) repr.Representation = representation view.CameraPosition = camera_position view.CameraFocalPoint = camera_focal_point view.CameraViewUp = camera_view_up if fit_view_to_scene: # N.B.: this email describes a more sophisticated (= proper?) # way of doing this, but it's probably overkill for now: # # http://www.paraview.org/pipermail/paraview/2012-March/024352.html # view.ResetCamera() view.OrientationAxesVisibility = (1 if show_orientation_axes else 0) view.CenterAxesVisibility = (1 if show_center_axes else 0) if palette == 'print': view.Background = [1.0, 1.0, 1.0] view.OrientationAxesLabelColor = [0.0, 0.0, 0.0] repr.AmbientColor = [0.0, 0.0, 0.0] elif palette == 'screen': view.Background = [0.32, 0.34, 0.43] view.OrientationAxesLabelColor = [1.0, 1.0, 1.0] repr.AmbientColor = [1.0, 1.0, 1.0] else: raise ValueError("Palette argument must be either 'print' " "or 'screen'. Got: {}".format(palette)) view.CameraParallelProjection = 1 if use_parallel_projection else 0 # Convert color_by_axis to integer and store the name separately try: color_by_axis = _axes[color_by_axis.lower()] except AttributeError: if not color_by_axis in [0, 1, 2, -1]: raise ValueError("color_by_axis must have one of the values " "[0, 1, 2, -1] or ['x', 'y', 'z', 'magnitude']. " "Got: {}".format(color_by_axis)) color_by_axis_name = _axes_names[color_by_axis] if timesteps is None: timesteps = reader.TimestepValues elif not isinstance(timesteps, (list, tuple, np.ndarray)): if not isinstance(timesteps, numbers.Number): raise TypeError( "Argument 'timesteps' must be either None or a number or a list of numbers. Got: '{}'".format(timesteps)) timesteps = [timesteps] data_range = (-1.0, 1.0) if rescale_colormap_to_data_range: dmin, dmax = np.infty, -np.infty for t in timesteps: reader.UpdatePipeline(t) dataInfo = reader.GetDataInformation() pointDataInfo = dataInfo.GetPointDataInformation() arrayInfo = pointDataInfo.GetArrayInformation(field_name) cur_data_range = arrayInfo.GetComponentRange(color_by_axis) dmin = min(cur_data_range[0], dmin) dmax = max(cur_data_range[1], dmax) data_range = (dmin, dmax) logger.debug("Rescaling colormap to data range: {}".format(data_range)) # Set the correct colormap and rescale it if necessary. try: cmap = _color_maps[colormap] if colormap == 'blue_to_red_rainbow': print(textwrap.dedent(""" Use of the 'rainbow' color map is discouraged as it has a number of distinct disadvantages. Use at your own risk! For details see, e.g., [1], [2]. [1] K. Moreland, "Diverging Color Maps for Scientific Visualization" http://www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf [2] http://www.paraview.org/ParaView3/index.php/Default_Color_Map """)) except KeyError: raise ValueError("Unsupported colormap: '{}'. Allowed values: " "{}".format(colormap, _color_maps.keys())) lut = servermanager.rendering.PVLookupTable() lut.ColorSpace = cmap.color_space rgb_points = cmap.rgb_points dmin, dmax = data_range cmin = rgb_points[0] cmax = rgb_points[-4] if cmin == cmax: # workaround for the case that the data range only # contains a single value cmax += 1e-8 for i in xrange(0, len(rgb_points), 4): rgb_points[i] = (rgb_points[i] - cmin) / \ (cmax - cmin) * (dmax - dmin) + dmin lut.RGBPoints = rgb_points lut.NanColor = cmap.nan_color if color_by_axis in [0, 1, 2]: lut.VectorMode = "Component" lut.VectorComponent = color_by_axis elif color_by_axis == -1: lut.VectorMode = "Magnitude" lut.VectorComponent = color_by_axis if diffuse_color is not None: print "diffuse_color: {} ({})".format(diffuse_color, type(diffuse_color)) repr.DiffuseColor = diffuse_color else: repr.LookupTable = lut if field_name is not None: repr.ColorArrayName = ("POINT_DATA", field_name) if add_glyphs: logger.debug("Adding cone glyphs.") glyph = pv.servermanager.filters.Glyph(Input=reader) # Try to determine an appropriate scale_factor automatically import vtk.util.numpy_support as VN grid = servermanager.Fetch(reader) # Determine approximate mesh spacing def mesh_spacing_for_cell(cell): cell_bounds = np.array(cell.GetBounds()).reshape((3, 2)) return float(min(filter(lambda x: x != 0.0, cell_bounds[:, 1] - cell_bounds[:, 0]))) mesh_spacing = np.average( [mesh_spacing_for_cell(grid.GetCell(i)) for i in range(grid.GetNumberOfCells())]) # Determine maximum field magnitude m = VN.vtk_to_numpy(grid.GetPointData().GetArray(field_name)) max_field_magnitude = float(max(map(np.linalg.norm, m))) glyph_scale_factor_internal = mesh_spacing / max_field_magnitude logger.debug( "Using automatically determined glyph_scale_factor_internal = {:.2g} " "(determined from approximate mesh spacing {:.2g} and maximum " "field magnitude {:.2g}). This may need manual tweaking in case " "glyphs appear very large or very small.".format( glyph_scale_factor_internal, mesh_spacing, max_field_magnitude)) glyph.ScaleFactor = glyph_scale_factor * glyph_scale_factor_internal glyph.ScaleMode = 'vector' glyph.Vectors = ['POINTS', field_name] try: # only relevant for animation IIUC, but can't hurt setting it glyph.KeepRandomPoints = 1 except AttributeError: # Older version of Paraview which doesn't support this setting. # Ignoring for now. pass #glyph.MaskPoints = glyph_mask_points #glyph.MaximumNumberofPoints = glyph_max_number_of_points if glyph_type != 'cones': glyph_type = 'cones' logger.warning("Unsupported glyph type: '{}'. " "Falling back to 'cones'.".format(glyph_type)) if glyph_type == 'cones': cone = servermanager.sources.Cone() cone.Resolution = 20 cone.Radius = 0.2 else: # This should not happen as we're catching it above. raise NotImplementedError() glyph.SetPropertyWithName('Source', cone) glyph_repr = servermanager.CreateRepresentation(glyph, view) glyph_repr.LookupTable = lut glyph_repr.ColorArrayName = ("POINT_DATA", 'GlyphVector') if show_colorbar: # XXX TODO: Remove the import of paraview.simple once I know why from paraview.simple import CreateScalarBar scalarbar = CreateScalarBar( Title=field_name, ComponentTitle=color_by_axis_name.capitalize(), Enabled=1, LabelFontSize=12, TitleFontSize=12) scalarbar.LabelFormat = colorbar_label_format, if palette == 'print': scalarbar.LabelColor = [0.0, 0.0, 0.0] # black labels for print else: scalarbar.LabelColor = [1.0, 1.0, 1.0] # white labels for screen view.Representations.append(scalarbar) scalarbar.LookupTable = lut reader.UpdatePipelineInformation() if outfile is None: _, outfile = tempfile.mkstemp(suffix='.png') view.ViewSize = view_size def write_image(outfilename): _, suffix = os.path.splitext(outfilename) if suffix == '.png': view.WriteImage(outfilename, "vtkPNGWriter", magnification) elif suffix in ['.jpg', '.jpeg']: view.WriteImage(outfilename, "vtkJPEGWriter", magnification) else: raise ValueError("Output image must have extension '.png' or " "'.jpg'. Got: {}".format(suffix)) if trim_border: if palette == 'print': bordercolor = '"rgb(255,255,255)"' else: # Strangely, we get a slightly different background # color for PNG than for JPG. bordercolor = '"rgb(82,87,110)"' if ( suffix == '.png') else '"rgb(82,87,109)"' cmd = 'mogrify -bordercolor {} -border 1x1 -trim {}'.format( bordercolor, outfilename) try: sp.check_output(cmd, stderr=sp.STDOUT, shell=True) logger.debug("Trimming border from rendered scene.") except OSError: logger.warning( "Using the 'trim' argument requires ImageMagick to be installed.") except sp.CalledProcessError as ex: logger.warning("Could not trim border from image. " "The error message was: {}".format(ex.output)) if rescale: rescale_factor = int(rescale * 100.0) cmd = 'mogrify -resize {:d}% {}'.format( rescale_factor, outfilename) try: sp.check_output(cmd, stderr=sp.STDOUT, shell=True) logger.debug( "Resizing output image by {:d}%".format(rescale_factor)) except OSError: logger.warning( "Using the 'rescale' argument requires ImageMagick to be installed.") except sp.CalledProcessError as ex: logger.warning( "Could not rescale image. The error message was: {}".format(ex.output)) if len(timesteps) == 1: # If a single timestep is rendered, we return the resulting image. view.ViewTime = timesteps[0] write_image(outfile) res = IPython.core.display.Image(filename=outfile) else: # Otherwise we export a bunch of images with sequentially # numbered suffixes. # # TODO: What should we return? Just the image for the first # timestep as we currently do? Or can we somehow create # a video and return that? outbasename, outsuffix = os.path.splitext(outfile) def generate_outfilename(i, t): return outbasename + '_{:04d}_'.format(i) + str(t) + outsuffix for (i, t) in enumerate(timesteps): view.ViewTime = t cur_outfilename = generate_outfilename(i, t) logger.debug( "Saving timestep {} to file '{}'.".format(t, cur_outfilename)) write_image(cur_outfilename) res = IPython.core.display.Image( filename=generate_outfilename(0, timesteps[0])) servermanager.Disconnect() return None
point = PointSource(Center=[0.0, 0.5, 0.0], NumberOfPoints=1) pprobe = ProbeLocation(ProbeType=point, Input=reader) outfile = open("height.txt", 'w') for time in timesteps: print "Time =" + str(time) outfile.write(str(time)) phi_old = 99.99 height = 0.0 p = -1 for probe in probes: p = p + 1 probe.UpdatePipeline(time) fp = servermanager.Fetch(probe) pdata = fp.GetPointData() for i in range(opts.resolution + 1): phi = pdata.GetArray("phid").GetTuple1(i) if (phi > 0.0) and (phi_old < 0.0): height = (float(i - 1) + (phi_old / (phi_old - phi))) / float(opts.resolution) phi_old = phi if (height > 0.0): phi = 111.0 while abs(phi) > opts.accuracy: point.Center = [xloc[p], 0.5, height] fp2 = servermanager.Fetch(pprobe) pdata2 = fp2.GetPointData()
def values(self): _vtk_object = sm.Fetch(self.current_array) _vtk_object = dsa.WrapDataObject(_vtk_object) return _vtk_object.PointData
# the programmable filter input ... #=================================== filter.Input = reader pxm.RegisterProxy("sources", "my programmable filter", filter) # Perform a sum operation #========================= sum = servermanager.filters.MinMax(Operation="SUM") # Reduce the programmable filter output # data using our "max" algorithm, # returning just the maximum error value # (instead of transferring the entire # dataset to the client) #======================================= myoutput = servermanager.Fetch(filter, sum, sum) cellData = myoutput.GetCellData() if cellData.GetArray("ObjectId").GetValue(0) != 7472: print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray( 0).GetName() sys.exit(1) if cellData.GetArray("GlobalElementId").GetValue(0) != 27919128: print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray( 1).GetName() sys.exit(1) if cellData.GetArray("PedigreeElementId").GetValue(0) != 27919128: print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray( 2).GetName()
def data_values(self): _vtk_object = sm.Fetch(self.vtk_file) _vtk_object = dsa.WrapDataObject(_vtk_object) return _vtk_object.PointData
def mesh_points(self) -> pd.DataFrame: vtk_object = sm.Fetch(self.filter) return pd.DataFrame(vtk_to_numpy(vtk_object.GetPoints().GetData()), columns=["x", "y", "z"])
cell_sol000010_xmf.CellArrays = [ 'PROC_COLOR', 'SKEWNESS', 'TENSION', 'VOLUME_FLAG' ] cell_sol000010_xmf.Sets = [] cell_sol000010_xmf.PointArrays = [ 'BND_FLAG', 'CURVATURE', 'CURVATURE_FORCE', 'ELASTIC_FORCE', 'EXTERNAL_FORCE', 'GAUSSIAN_CURVATURE', 'LAPL_CURV', 'MULTIPLICITY', 'SOLID_FORCE', 'SPONTANEOUS_CURVATURE', 'STRETCHING', 'TOTAL_FORCE', 'U', 'X_NODE_REF' ] cell_sol000010_xmf.Grids = ['mesh'] # Restrict data to RBCs surface surf = servermanager.filters.ExtractSurface(Input=cell_sol000010_xmf) datasurf = servermanager.Fetch(surf) # Store CURVATURE in curv arrays datacurv1 = datasurf.GetPointData().GetArray("CURVATURE") datacurv2 = datasurf.GetPointData().GetArray("GAUSSIAN_CURVATURE") gauss_curv, curv = [], [] for i in range(datacurv1.GetNumberOfTuples()): #print(i,pcurv.GetTuple(i)[0]) curv.append(datacurv1.GetTuple(i)[0]) for i in range(datacurv2.GetNumberOfTuples()): #print(i,pcurv.GetTuple(i)[0]) gauss_curv.append(datacurv2.GetTuple(i)[0]) print('CURVATURE data read')
def cell_keys(self): vtk_object = sm.Fetch(self.filter) vtk_object = dsa.WrapDataObject(vtk_object) return vtk_object.CellData.keys()
import os import os.path import sys import paraview paraview.compatibility.major = 3 paraview.compatibility.minor = 4 from paraview import servermanager smtesting.ProcessCommandLineArguments() servermanager.Connect() file1 = os.path.join(smtesting.DataDir, "bot2.wrl") reader = servermanager.sources.vrmlreader(FileName = file1) readerOutput = servermanager.Fetch(reader) if readerOutput.GetClassName() != "vtkMultiBlockDataSet": print "ERROR: Wrong dataset type returned:", readerOutput.GetClassName() sys.exit(1) if readerOutput.GetNumberOfPoints() != 337: print "ERROR: Wrong number of points returned." sys.exit(1) if readerOutput.GetNumberOfBlocks() != 1: print "ERROR: Wrong number of blocks returned." sys.exit(1) ds0 = readerOutput.GetBlock(0) if ds0.GetClassName() != "vtkPolyData":
def point_keys(self): vtk_object = sm.Fetch(self.filter) vtk_object = dsa.WrapDataObject(vtk_object) return vtk_object.PointData.keys()
if not paraview.servermanager.ActiveConnection: connection = paraview.servermanager.Connect() reader = servermanager.sources.XDMFReader(FileNames=opts.filename) reader.UpdatePipeline() timesteps = reader.TimestepValues points = [] points.append(PointSource(Center=[2.3950, 0.4745, 0.020], NumberOfPoints=1)) points.append(PointSource(Center=[2.3950, 0.4745, 0.100], NumberOfPoints=1)) points.append(PointSource(Center=[2.4195, 0.5255, 0.161], NumberOfPoints=1)) points.append(PointSource(Center=[2.4995, 0.5255, 0.161], NumberOfPoints=1)) probes = [] for point in points: probes.append(ProbeLocation(ProbeType=point, Input=reader)) outfile = open("pressure.txt", 'w') for time in timesteps: outfile.write(str(time)) for probe in probes: probe.UpdatePipeline(time) fp = servermanager.Fetch(probe) pdata = fp.GetPointData() pressure = pdata.GetArray("p").GetTuple1(0) outfile.write(" " + str(pressure)) outfile.write("\n") outfile.close()
def get_mean_component(array, component_index): array_length = array.GetNumberOfTuples() return sum( array.GetComponent(i, component_index) for i in range(array_length)) / array_length def get_max_component(array, component_index): array_length = array.GetNumberOfTuples() return max( array.GetComponent(i, component_index) for i in range(array_length)) reader = PVDReader(FileName=r"C:\Projects\effective_yield_surface\calc.pvd") data = servermanager.Fetch(reader) point_data = data.GetPointData() stress = point_data.GetArray("Stress") max_mises_stress = get_max_component(stress, 6) scale_coefficient = YIELD_STRENGTH / max_mises_stress averaged_stress = [ [get_mean_component(stress, 0), get_mean_component(stress, 3)], [get_mean_component(stress, 3), get_mean_component(stress, 1)], ] principial_averaged_stress = [ (averaged_stress[0][0] + averaged_stress[1][1]) / 2 + sqrt(((averaged_stress[0][0] - averaged_stress[1][1]) / 2)**2 +