def gaussianSmooth(self, sigma=(2, 2, 2), radius=None): """Performs a convolution of the input Volume with a gaussian. :param float,list sigma: standard deviation(s) in voxel units. A list can be given to smooth in the three direction differently. :param float,list radius: radius factor(s) determine how far out the gaussian kernel will go before being clamped to zero. A list can be given too. """ gsf = vtk.vtkImageGaussianSmooth() gsf.SetDimensionality(3) gsf.SetInputData(self.imagedata()) if utils.isSequence(sigma): gsf.SetStandardDeviations(sigma) else: gsf.SetStandardDeviation(sigma) if radius is not None: if utils.isSequence(radius): gsf.SetRadiusFactors(radius) else: gsf.SetRadiusFactor(radius) gsf.Update() return self._update(gsf.GetOutput())
def __init__(self, obj=None): vtk.vtkImageActor.__init__(self) Base3DProp.__init__(self) if utils.isSequence(obj) and len(obj): iac = vtk.vtkImageAppendComponents() for i in range(3): #arr = np.flip(np.flip(array[:,:,i], 0), 0).ravel() arr = np.flip(obj[:, :, i], 0).ravel() varb = numpy_to_vtk(arr, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR) imgb = vtk.vtkImageData() imgb.SetDimensions(obj.shape[1], obj.shape[0], 1) imgb.GetPointData().SetScalars(varb) iac.AddInputData(0, imgb) iac.Update() img = iac.GetOutput() self.SetInputData(img) elif isinstance(obj, vtk.vtkImageData): self.SetInputData(obj) img = obj elif isinstance(obj, str): if "https://" in obj: import vedo.io as io obj = io.download(obj, verbose=False) if ".png" in obj: picr = vtk.vtkPNGReader() elif ".jpg" in obj or ".jpeg" in obj: picr = vtk.vtkJPEGReader() elif ".bmp" in obj: picr = vtk.vtkBMPReader() elif ".tif" in obj: picr = vtk.vtkTIFFReader() else: colors.printc("Cannot understand picture format", obj, c='r') return picr.SetFileName(obj) picr.Update() img = picr.GetOutput() self.SetInputData(img) else: img = vtk.vtkImageData() self.SetInputData(img) self._data = img self._mapper = self.GetMapper()
def _buildtetugrid(self, points, cells): ug = vtk.vtkUnstructuredGrid() if len(points) == 0: return ug if not utils.isSequence(points[0]): return ug if len(cells) == 0: return ug if not utils.isSequence(cells[0]): tets = [] nf = cells[0] + 1 for i, cl in enumerate(cells): if i == nf or i == 0: k = i + 1 nf = cl + k cell = [cells[j + k] for j in range(cl)] tets.append(cell) cells = tets sourcePoints = vtk.vtkPoints() varr = numpy_to_vtk(np.ascontiguousarray(points), deep=True) sourcePoints.SetData(varr) ug.SetPoints(sourcePoints) sourceTets = vtk.vtkCellArray() for f in cells: ele = vtk.vtkTetra() pid = ele.GetPointIds() for i, fi in enumerate(f): pid.SetId(i, fi) sourceTets.InsertNextCell(ele) ug.SetCells(vtk.VTK_TETRA, sourceTets) return ug
def exe_info(args): for i in range(2, len(sys.argv)): file = sys.argv[i] try: A = load(file) if isinstance(A, np.ndarray): printInfo(A) elif isSequence(A): for a in A: printInfo(a) else: printInfo(A) except: printc("Could not load:", file, "skip.", c="r") printc("_" * 65, bold=0) printc("vedo version :", __version__, invert=1, end=' ') printc("https://vedo.embl.es", underline=1, italic=1) printc("vtk version :", vtk.vtkVersion().GetVTKVersion()) printc("python version :", sys.version.replace("\n", "")) printc("python interpreter:", sys.executable) printc("vedo installation :", settings.installdir) try: import platform printc("system :", platform.system(), platform.release(), os.name, platform.machine()) except: pass try: import k3d printc("k3d version :", k3d.__version__, bold=0, dim=1) except: pass try: import ipyvtk_simple printc("ipyvtk version :", ipyvtk_simple.__version__, bold=0, dim=1) except: pass try: import itkwidgets printc("itkwidgets version:", itkwidgets.__version__, bold=0, dim=1) except: pass try: import panel printc("panel version :", panel.__version__, bold=0, dim=1) except: pass
def alphaGradient(self, alphaGrad, vmin=None, vmax=None): """ Assign a set of tranparencies to a volume's gradient along the range of the scalar value. A single constant value can also be assigned. The gradient function is used to decrease the opacity in the "flat" regions of the volume while maintaining the opacity at the boundaries between material types. The gradient is measured as the amount by which the intensity changes over unit distance. The format for alphaGrad is the same as for method ``volume.alpha()``. |read_volume2| |read_volume2.py|_ """ if vmin is None: vmin, _ = self._data.GetScalarRange() if vmax is None: _, vmax = self._data.GetScalarRange() self._alphaGrad = alphaGrad volumeProperty = self.GetProperty() if alphaGrad is None: volumeProperty.DisableGradientOpacityOn() return self else: volumeProperty.DisableGradientOpacityOff() gotf = volumeProperty.GetGradientOpacity() if utils.isSequence(alphaGrad): alphaGrad = np.array(alphaGrad) if len(alphaGrad.shape ) == 1: # user passing a flat list e.g. (0.0, 0.3, 0.9, 1) for i, al in enumerate(alphaGrad): xalpha = vmin + (vmax - vmin) * i / (len(alphaGrad) - 1) # Create transfer mapping scalar value to gradient opacity gotf.AddPoint(xalpha, al) elif len(alphaGrad.shape) == 2: # user passing [(x0,alpha0), ...] gotf.AddPoint(vmin, alphaGrad[0][1]) for xalpha, al in alphaGrad: # Create transfer mapping scalar value to opacity gotf.AddPoint(xalpha, al) gotf.AddPoint(vmax, alphaGrad[-1][1]) #colors.printc("alphaGrad at", round(xalpha, 1), "\tset to", al, c="b", bold=0) else: gotf.AddPoint(vmin, alphaGrad) # constant alphaGrad gotf.AddPoint(vmax, alphaGrad) return self
def MeshLines(*inputobj, **options): """ Build the line segments between two lists of points `startPoints` and `endPoints`. `startPoints` can be also passed in the form ``[[point1, point2], ...]``. A dolfin ``Mesh`` that was deformed/modified by a function can be passed together as inputs. :param float scale: apply a rescaling factor to the length """ scale = options.pop("scale", 1) lw = options.pop("lw", 1) c = options.pop("c", 'grey') alpha = options.pop("alpha", 1) mesh, u = _inputsort(inputobj) if not mesh: return None if hasattr(mesh, "coordinates"): startPoints = mesh.coordinates() else: startPoints = mesh.geometry.points u_values = _compute_uvalues(u, mesh) if not utils.isSequence(u_values[0]): printc("~times Error: cannot show Lines for 1D scalar values!", c=1) raise RuntimeError() endPoints = startPoints + u_values if u_values.shape[1] == 2: # u_values is 2D u_values = np.insert(u_values, 2, 0, axis=1) # make it 3d startPoints = np.insert(startPoints, 2, 0, axis=1) # make it 3d endPoints = np.insert(endPoints, 2, 0, axis=1) # make it 3d actor = shapes.Lines(startPoints, endPoints, scale=scale, lw=lw, c=c, alpha=alpha) actor.mesh = mesh actor.u = u actor.u_values = u_values return actor
def MeshArrows(*inputobj, **options): """ Build arrows representing displacements. :param float s: cross-section size of the arrow :param float rescale: apply a rescaling factor to the length """ s = options.pop("s", None) scale = options.pop("scale", 1) c = options.pop("c", "gray") alpha = options.pop("alpha", 1) res = options.pop("res", 12) mesh, u = _inputsort(inputobj) if not mesh: return None if hasattr(mesh, "coordinates"): startPoints = mesh.coordinates() else: startPoints = mesh.geometry.points u_values = _compute_uvalues(u, mesh) if not utils.isSequence(u_values[0]): printc("~times Error: cannot show Arrows for 1D scalar values!", c=1) raise RuntimeError() endPoints = startPoints + u_values if u_values.shape[1] == 2: # u_values is 2D u_values = np.insert(u_values, 2, 0, axis=1) # make it 3d startPoints = np.insert(startPoints, 2, 0, axis=1) # make it 3d endPoints = np.insert(endPoints, 2, 0, axis=1) # make it 3d actor = shapes.Arrows(startPoints, endPoints, s=s, scale=scale, alpha=alpha, res=res) actor.color(c) actor.mesh = mesh actor.u = u actor.u_values = u_values return actor
def _parse(self, objs, t, duration): if t is None: if self._lastT: t = self._lastT else: t = 0.0 if duration is None: if self._lastDuration: duration = self._lastDuration else: duration = 0.0 if objs is None: if self._lastActs: objs = self._lastActs else: printc('Need to specify actors!', c='r') raise RuntimeError objs2 = objs if isSequence(objs): objs2 = objs else: objs2 = [objs] #quantize time steps and duration t = int(t / self.timeResolution + 0.5) * self.timeResolution nsteps = int(duration / self.timeResolution + 0.5) duration = nsteps * self.timeResolution rng = np.linspace(t, t + duration, nsteps + 1) self._lastT = t self._lastDuration = duration self._lastActs = objs2 for a in objs2: if a not in self.actors: self.actors.append(a) return objs2, t, duration, rng
def _buildtetugrid(self, points, cells): if len(points) == 0: return None if not utils.isSequence(points[0]): return None ug = vtk.vtkUnstructuredGrid() sourcePoints = vtk.vtkPoints() varr = numpy_to_vtk(np.ascontiguousarray(points), deep=True) sourcePoints.SetData(varr) ug.SetPoints(sourcePoints) sourceTets = vtk.vtkCellArray() for f in cells: ele = vtk.vtkTetra() pid = ele.GetPointIds() for i, fi in enumerate(f): pid.SetId(i, fi) sourceTets.InsertNextCell(ele) ug.SetCells(vtk.VTK_TETRA, sourceTets) return ug
def append(self, volumes, axis='z', preserveExtents=False): """ Take the components from multiple inputs and merges them into one output. Except for the append axis, all inputs must have the same extent. All inputs must have the same number of scalar components. The output has the same origin and spacing as the first input. The origin and spacing of all other inputs are ignored. All inputs must have the same scalar type. :param int,str axis: axis expanded to hold the multiple images. :param bool preserveExtents: if True, the extent of the inputs is used to place the image in the output. The whole extent of the output is the union of the input whole extents. Any portion of the output not covered by the inputs is set to zero. The origin and spacing is taken from the first input. .. code-block:: python from vedo import load, datadir vol = load(datadir+'embryo.tif') vol.append(vol, axis='x').show() """ ima = vtk.vtkImageAppend() ima.SetInputData(self.imagedata()) if not utils.isSequence(volumes): volumes = [volumes] for volume in volumes: if isinstance(volume, vtk.vtkImageData): ima.AddInputData(volume) else: ima.AddInputData(volume.imagedata()) ima.SetPreserveExtents(preserveExtents) if axis == "x": axis = 0 elif axis == "y": axis = 1 elif axis == "z": axis = 2 ima.SetAppendAxis(axis) ima.Update() return self._update(ima.GetOutput())
def append(self, pictures, axis='z', preserveExtents=False): """ Append the input images to the current one along the specified axis. Except for the append axis, all inputs must have the same extent. All inputs must have the same number of scalar components. The output has the same origin and spacing as the first input. The origin and spacing of all other inputs are ignored. All inputs must have the same scalar type. :param int,str axis: axis expanded to hold the multiple images. :param bool preserveExtents: if True, the extent of the inputs is used to place the image in the output. The whole extent of the output is the union of the input whole extents. Any portion of the output not covered by the inputs is set to zero. The origin and spacing is taken from the first input. .. code-block:: python from vedo import Picture, dataurl pic = Picture(dataurl+'dog.jpg').pad() pic.append([pic,pic,pic], axis='y') pic.append([pic,pic,pic,pic], axis='x') pic.show(axes=1) """ ima = vtk.vtkImageAppend() ima.SetInputData(self._data) if not utils.isSequence(pictures): pictures = [pictures] for p in pictures: if isinstance(p, vtk.vtkImageData): ima.AddInputData(p) else: ima.AddInputData(p._data) ima.SetPreserveExtents(preserveExtents) if axis == "x": axis = 0 elif axis == "y": axis = 1 ima.SetAppendAxis(axis) ima.Update() return self._update(ima.GetOutput())
def delaunay3D(mesh, alphaPar=0, tol=None, boundary=False): """Create 3D Delaunay triangulation of input points.""" deln = vtk.vtkDelaunay3D() if utils.isSequence(mesh): pd = vtk.vtkPolyData() vpts = vtk.vtkPoints() vpts.SetData(numpy_to_vtk(np.ascontiguousarray(mesh), deep=True)) pd.SetPoints(vpts) deln.SetInputData(pd) else: deln.SetInputData(mesh.GetMapper().GetInput()) deln.SetAlpha(alphaPar) deln.AlphaTetsOn() deln.AlphaTrisOff() deln.AlphaLinesOff() deln.AlphaVertsOff() if tol: deln.SetTolerance(tol) deln.SetBoundingTriangulation(boundary) deln.Update() m = TetMesh(deln.GetOutput()) return m
def pad(self, pixels=10, value=255): """ Add the specified number of pixels at the picture borders. Pixels can be a list formatted as [left,right,bottom,top]. Parameters ---------- pixels : int,list , optional number of pixels to be added (or a list of length 4). The default is 10. value : int, optional intensity value (gray-scale color) of the padding. The default is 255. """ x0,x1,y0,y1,_z0,_z1 = self._data.GetExtent() pf = vtk.vtkImageConstantPad() pf.SetInputData(self._data) pf.SetConstant(value) if utils.isSequence(pixels): pf.SetOutputWholeExtent(x0-pixels[0],x1+pixels[1], y0-pixels[2],y1+pixels[3], 0,0) else: pf.SetOutputWholeExtent(x0-pixels,x1+pixels, y0-pixels,y1+pixels, 0,0) pf.Update() img = pf.GetOutput() return self._update(img)
def __init__( self, inputobj=None, c='RdBu_r', alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0), alphaGradient=None, alphaUnit=1, mode=0, shade=False, spacing=None, dims=None, origin=None, mapper='smart', ): vtk.vtkVolume.__init__(self) BaseGrid.__init__(self) ################### if isinstance(inputobj, str): if "https://" in inputobj: from vedo.io import download inputobj = download(inputobj, verbose=False) # fpath elif os.path.isfile(inputobj): pass else: inputobj = sorted(glob.glob(inputobj)) ################### if 'gpu' in mapper: self._mapper = vtk.vtkGPUVolumeRayCastMapper() elif 'opengl_gpu' in mapper: self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper() elif 'smart' in mapper: self._mapper = vtk.vtkSmartVolumeMapper() elif 'fixed' in mapper: self._mapper = vtk.vtkFixedPointVolumeRayCastMapper() elif isinstance(mapper, vtk.vtkMapper): self._mapper = mapper else: print("Error unknown mapper type", [mapper]) raise RuntimeError() self.SetMapper(self._mapper) ################### inputtype = str(type(inputobj)) #colors.printc('Volume inputtype', inputtype) if inputobj is None: img = vtk.vtkImageData() elif utils.isSequence(inputobj): if isinstance(inputobj[0], str): # scan sequence of BMP files ima = vtk.vtkImageAppend() ima.SetAppendAxis(2) pb = utils.ProgressBar(0, len(inputobj)) for i in pb.range(): f = inputobj[i] picr = vtk.vtkBMPReader() picr.SetFileName(f) picr.Update() mgf = vtk.vtkImageMagnitude() mgf.SetInputData(picr.GetOutput()) mgf.Update() ima.AddInputData(mgf.GetOutput()) pb.print('loading...') ima.Update() img = ima.GetOutput() else: if "ndarray" not in inputtype: inputobj = np.array(inputobj) if len(inputobj.shape) == 1: varr = numpy_to_vtk(inputobj, deep=True, array_type=vtk.VTK_FLOAT) else: if len(inputobj.shape) > 2: inputobj = np.transpose(inputobj, axes=[2, 1, 0]) varr = numpy_to_vtk(inputobj.ravel(order='F'), deep=True, array_type=vtk.VTK_FLOAT) varr.SetName('input_scalars') img = vtk.vtkImageData() if dims is not None: img.SetDimensions(dims) else: if len(inputobj.shape) == 1: colors.printc( "Error: must set dimensions (dims keyword) in Volume.", c='r') raise RuntimeError() img.SetDimensions(inputobj.shape) img.GetPointData().SetScalars(varr) #to convert rgb to numpy # img_scalar = data.GetPointData().GetScalars() # dims = data.GetDimensions() # n_comp = img_scalar.GetNumberOfComponents() # temp = numpy_support.vtk_to_numpy(img_scalar) # numpy_data = temp.reshape(dims[1],dims[0],n_comp) # numpy_data = numpy_data.transpose(0,1,2) # numpy_data = np.flipud(numpy_data) elif "ImageData" in inputtype: img = inputobj elif isinstance(inputobj, Volume): img = inputobj.inputdata() elif "UniformGrid" in inputtype: img = inputobj elif hasattr( inputobj, "GetOutput"): # passing vtk object, try extract imagdedata if hasattr(inputobj, "Update"): inputobj.Update() img = inputobj.GetOutput() elif isinstance(inputobj, str): from vedo.io import loadImageData, download if "https://" in inputobj: inputobj = download(inputobj, verbose=False) img = loadImageData(inputobj) else: colors.printc("Volume(): cannot understand input type:\n", inputtype, c='r') return if dims is not None: img.SetDimensions(dims) if origin is not None: img.SetOrigin(origin) ### DIFFERENT from volume.origin()! if spacing is not None: img.SetSpacing(spacing) self._data = img self._mapper.SetInputData(img) self.mode(mode).color(c).alpha(alpha).alphaGradient(alphaGradient) self.GetProperty().SetShade(True) self.GetProperty().SetInterpolationType(1) self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit) # remember stuff: self._mode = mode self._color = c self._alpha = alpha self._alphaGrad = alphaGradient self._alphaUnit = alphaUnit
def __init__(self, obj=None, channels=(), flip=False): vtk.vtkImageActor.__init__(self) vedo.base.Base3DProp.__init__(self) if utils.isSequence(obj) and len(obj): # passing array obj = np.asarray(obj) if len(obj.shape) == 3: # has shape (nx,ny, ncolor_alpha_chan) iac = vtk.vtkImageAppendComponents() nchan = obj.shape[ 2] # get number of channels in inputimage (L/LA/RGB/RGBA) for i in range(nchan): if flip: arr = np.flip(np.flip(obj[:, :, i], 0), 0).ravel() else: arr = np.flip(obj[:, :, i], 0).ravel() varb = numpy_to_vtk(arr, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR) varb.SetName("RGBA") imgb = vtk.vtkImageData() imgb.SetDimensions(obj.shape[1], obj.shape[0], 1) imgb.GetPointData().AddArray(varb) imgb.GetPointData().SetActiveScalars("RGBA") iac.AddInputData(imgb) iac.Update() img = iac.GetOutput() elif len(obj.shape) == 2: # black and white if flip: arr = np.flip(obj[:, :], 0).ravel() else: arr = obj.ravel() varb = numpy_to_vtk(arr, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR) varb.SetName("RGBA") img = vtk.vtkImageData() img.SetDimensions(obj.shape[1], obj.shape[0], 1) img.GetPointData().AddArray(varb) img.GetPointData().SetActiveScalars("RGBA") elif isinstance(obj, vtk.vtkImageData): img = obj elif isinstance(obj, str): if "https://" in obj: obj = vedo.io.download(obj, verbose=False) fname = obj.lower() if fname.endswith(".png"): picr = vtk.vtkPNGReader() elif fname.endswith(".jpg") or fname.endswith(".jpeg"): picr = vtk.vtkJPEGReader() elif fname.endswith(".bmp"): picr = vtk.vtkBMPReader() elif fname.endswith(".tif") or fname.endswith(".tiff"): picr = vtk.vtkTIFFReader() picr.SetOrientationType(vedo.settings.tiffOrientationType) else: colors.printc("Cannot understand picture format", obj, c='r') return picr.SetFileName(obj) self.filename = obj picr.Update() img = picr.GetOutput() else: img = vtk.vtkImageData() # select channels nchans = len(channels) if nchans and img.GetPointData().GetScalars().GetNumberOfComponents( ) > nchans: pec = vtk.vtkImageExtractComponents() pec.SetInputData(img) if nchans == 3: pec.SetComponents(channels[0], channels[1], channels[2]) elif nchans == 2: pec.SetComponents(channels[0], channels[1]) elif nchans == 1: pec.SetComponents(channels[0]) pec.Update() img = pec.GetOutput() self._data = img self.SetInputData(img) sx, sy, _ = img.GetDimensions() self.shape = np.array([sx, sy]) self._mapper = self.GetMapper()
def __init__(self, obj=None): vtk.vtkImageActor.__init__(self) vedo.base.Base3DProp.__init__(self) if utils.isSequence(obj) and len(obj): # passing array obj = np.asarray(obj) if len(obj.shape) == 3: # has shape (nx,ny, ncolor_alpha_chan) iac = vtk.vtkImageAppendComponents() nchan = obj.shape[ 2] # get number of channels in inputimage (L/LA/RGB/RGBA) for i in range(nchan): #arr = np.flip(np.flip(array[:,:,i], 0), 0).ravel() arr = np.flip(obj[:, :, i], 0).ravel() varb = numpy_to_vtk(arr, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR) varb.SetName("RGBA") imgb = vtk.vtkImageData() imgb.SetDimensions(obj.shape[1], obj.shape[0], 1) imgb.GetPointData().SetScalars(varb) iac.AddInputData(imgb) iac.Update() img = iac.GetOutput() elif len(obj.shape) == 2: # black and white arr = np.flip(obj[:, :], 0).ravel() varb = numpy_to_vtk(arr, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR) varb.SetName("RGBA") img = vtk.vtkImageData() img.SetDimensions(obj.shape[1], obj.shape[0], 1) img.GetPointData().SetScalars(varb) elif isinstance(obj, vtk.vtkImageData): img = obj elif isinstance(obj, str): if "https://" in obj: obj = vedo.io.download(obj, verbose=False) if ".png" in obj.lower(): picr = vtk.vtkPNGReader() elif ".jpg" in obj.lower() or ".jpeg" in obj.lower(): picr = vtk.vtkJPEGReader() elif ".bmp" in obj.lower(): picr = vtk.vtkBMPReader() elif ".tif" in obj.lower(): picr = vtk.vtkTIFFReader() else: colors.printc("Cannot understand picture format", obj, c='r') return picr.SetFileName(obj) self.filename = obj picr.Update() img = picr.GetOutput() else: img = vtk.vtkImageData() self._data = img self.SetInputData(img) sx, sy, _ = img.GetDimensions() self.shape = np.array([sx, sy]) self._mapper = self.GetMapper()
def printc( *strings, c=None, bc=None, bold=True, italic=False, blink=False, underline=False, strike=False, dim=False, invert=False, box="", dbg=False, end="\n", flush=True, ): """ Print to terminal in color (any color!). :param c: foreground color name or (r,g,b) :param bc: background color name or (r,g,b) :param bool bold: boldface [True] :param bool italic: italic [False] :param bool blink: blinking text [False] :param bool underline: underline text [False] :param bool strike: strike through text [False] :param bool dim: make text look dimmer [False] :param bool invert: invert background and forward colors [False] :param box: print a box with specified text character [''] :param bool flush: flush buffer after printing [True] :param str end: the end character to be printed [newline] :param bool dbg: print debug information about the evironment :Example: .. code-block:: python from vedo.colors import printc printc('anything', c='tomato', bold=False, end='' ) printc('anything', 455.5, vtkObject, c='lightblue') printc(299792.48, c=4) .. hint:: |colorprint.py|_ |colorprint| """ if not settings.enablePrintColor: print(*strings, end=end, flush=flush) return if not settings.notebookBackend: if not _terminal_has_colors: print(*strings, end=end, flush=flush) return try: # ------------------------------------------------------------- txt = str() ns = len(strings) - 1 separator = " " offset = 0 for i, s in enumerate(strings): if i == ns: separator = "" if "\\" in repr(s): # "in" for some reasons changes s from vedo.shapes import _reps for k in emoji.keys(): if k in str(s): s = s.replace(k, emoji[k]) offset += 1 for k, rp in _reps: # check symbols in shapes._reps if k in str(s): s = s.replace(k, rp) offset += 1 txt += str(s) + separator special, cseq, reset = "", "", u"\u001b[0m" oneletter_colors = { 'k': u'\u001b[30;1m', # because these are supported by most terminals 'r': u'\u001b[31;1m', 'g': u'\u001b[32;1m', 'y': u'\u001b[33;1m', 'b': u'\u001b[34;1m', 'm': u'\u001b[35;1m', 'c': u'\u001b[36;1m', 'w': u'\u001b[37;1m', } if c is not None: if c is True: c = "g" elif c is False: c = "r" if isinstance(c, str) and c in oneletter_colors.keys(): cseq += oneletter_colors[c] else: r, g, b = getColor(c) # not all terms support this syntax cseq += f"\x1b[38;2;{int(r*255)};{int(g*255)};{int(b*255)}m" if bc: if bc in oneletter_colors.keys(): cseq += oneletter_colors[bc] else: r, g, b = getColor(bc) cseq += f"\x1b[48;2;{int(r*255)};{int(g*255)};{int(b*255)}m" if box is True: box = '-' if underline and not box: special += "\x1b[4m" if strike and not box: special += "\x1b[9m" if dim: special += "\x1b[2m" if invert: special += "\x1b[7m" if bold: special += "\x1b[1m" if italic: special += "\x1b[3m" if blink: special += "\x1b[5m" if box and not ("\n" in txt): box = box[0] boxv = box if box in ["_", "=", "-", "+", "~"]: boxv = "|" if box == "_" or box == ".": outtxt = special + cseq + " " + box * (len(txt) + offset + 2) + " \n" outtxt += boxv + " " * (len(txt) + 2) + boxv + "\n" else: outtxt = special + cseq + box * (len(txt) + offset + 4) + "\n" outtxt += boxv + " " + txt + " " + boxv + "\n" if box == "_": outtxt += "|" + box * (len(txt) + offset + 2) + "|" + reset + end else: outtxt += box * (len(txt) + offset + 4) + reset + end sys.stdout.write(outtxt) else: out = special + cseq + txt + reset if dbg: from inspect import currentframe, getframeinfo from vedo.utils import isSequence, precision cf = currentframe().f_back cfi = getframeinfo(cf) fname = os.path.basename(getframeinfo(cf).filename) print("\x1b[7m\x1b[3m\x1b[37m" + fname + " line:\x1b[1m" + str(cfi.lineno) + reset, end='') print('\x1b[3m\x1b[37m\x1b[2m', "\U00002501" * 30, time.ctime(), reset) if txt: print(" \x1b[37m\x1b[1mMessage : " + out) print(" \x1b[37m\x1b[1mFunction:\x1b[0m\x1b[37m " + str(cfi.function)) print(' \x1b[1mLocals :' + reset) for loc in cf.f_locals.keys(): obj = cf.f_locals[loc] var = repr(obj) if 'module ' in var: continue if 'function ' in var: continue if 'class ' in var: continue if '_' in loc: continue if hasattr(obj, 'name'): if not obj.name: oname = str(type(obj)) else: oname = obj.name var = oname + ', at ' + precision(obj.GetPosition(), 3) print(' \x1b[37m', loc, '=', var[:60].replace('\n', ''), reset) if isSequence(obj) and len(obj) > 4: print(' \x1b[37m\x1b[2m\x1b[3m len:', len(obj), ' min:', precision(min(obj), 4), ' max:', precision(max(obj), 4), reset) print(" \x1b[1m\x1b[37mElapsed time:\x1b[0m\x1b[37m", str(time.time() - _global_start_time)[:6], 's' + reset) else: sys.stdout.write(out + end) except: # ------------------------------------------------------------- fallback print(*strings, end=end) if flush: sys.stdout.flush()
def __init__(self, inputobj=None): vtk.vtkActor.__init__(self) BaseGrid.__init__(self) inputtype = str(type(inputobj)) self._data = None self._polydata = None self.name = "UGrid" ################### if inputobj is None: self._data = vtk.vtkUnstructuredGrid() elif utils.isSequence(inputobj): pts, cells, celltypes = inputobj self._data = vtk.vtkUnstructuredGrid() if not utils.isSequence(cells[0]): tets = [] nf = cells[0] + 1 for i, cl in enumerate(cells): if i == nf or i == 0: k = i + 1 nf = cl + k cell = [cells[j + k] for j in range(cl)] tets.append(cell) cells = tets # This would fill the points and use those to define orientation vpts = utils.numpy2vtk(pts, dtype=float) points = vtk.vtkPoints() points.SetData(vpts) self._data.SetPoints(points) # This fill the points and use cells to define orientation # points = vtk.vtkPoints() # for c in cells: # for pid in c: # points.InsertNextPoint(pts[pid]) # self._data.SetPoints(points) # Fill cells # https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html for i, ct in enumerate(celltypes): cell_conn = cells[i] if ct == vtk.VTK_HEXAHEDRON: cell = vtk.vtkHexahedron() elif ct == vtk.VTK_TETRA: cell = vtk.vtkTetra() elif ct == vtk.VTK_VOXEL: cell = vtk.vtkVoxel() elif ct == vtk.VTK_WEDGE: cell = vtk.vtkWedge() elif ct == vtk.VTK_PYRAMID: cell = vtk.vtkPyramid() elif ct == vtk.VTK_HEXAGONAL_PRISM: cell = vtk.vtkHexagonalPrism() elif ct == vtk.VTK_PENTAGONAL_PRISM: cell = vtk.vtkPentagonalPrism() else: print("UGrid: cell type", ct, "not implemented. Skip.") continue cpids = cell.GetPointIds() for j, pid in enumerate(cell_conn): cpids.SetId(j, pid) self._data.InsertNextCell(ct, cpids) elif "UnstructuredGrid" in inputtype: self._data = inputobj elif isinstance(inputobj, str): from vedo.io import download, loadUnStructuredGrid if "https://" in inputobj: inputobj = download(inputobj, verbose=False) self._data = loadUnStructuredGrid(inputobj) self.filename = inputobj else: colors.printc("UGrid(): cannot understand input type:\n", inputtype, c='r') return # self._mapper = vtk.vtkDataSetMapper() self._mapper = vtk.vtkPolyDataMapper() self._mapper.SetInterpolateScalarsBeforeMapping( settings.interpolateScalarsBeforeMapping) if settings.usePolygonOffset: self._mapper.SetResolveCoincidentTopologyToPolygonOffset() pof, pou = settings.polygonOffsetFactor, settings.polygonOffsetUnits self._mapper.SetResolveCoincidentTopologyPolygonOffsetParameters( pof, pou) self.GetProperty().SetInterpolationToFlat() if not self._data: return # now fill the representation of the vtk unstr grid sf = vtk.vtkShrinkFilter() sf.SetInputData(self._data) sf.SetShrinkFactor(1.0) sf.Update() gf = vtk.vtkGeometryFilter() gf.SetInputData(sf.GetOutput()) gf.Update() self._polydata = gf.GetOutput() self._mapper.SetInputData(self._polydata) sc = None if self.useCells: sc = self._polydata.GetCellData().GetScalars() else: sc = self._polydata.GetPointData().GetScalars() if sc: self._mapper.SetScalarRange(sc.GetRange()) self.SetMapper(self._mapper) self.property = self.GetProperty()
def _inputsort(obj): import dolfin u = None mesh = None if not utils.isSequence(obj): obj = [obj] for ob in obj: inputtype = str(type(ob)) # printc('inputtype is', inputtype, c=2) if "vedo" in inputtype: # skip vtk objects, will be added later continue if "dolfin" in inputtype or "ufl" in inputtype: if "MeshFunction" in inputtype: mesh = ob.mesh() if ob.dim() > 0: printc('MeshFunction of dim>0 not supported.', c=1) printc('Try e.g.: MeshFunction("size_t", mesh, 0)', c=1, italic=1) printc('instead of MeshFunction("size_t", mesh, 1)', c=1, strike=1) else: #printc(ob.dim(), mesh.num_cells(), len(mesh.coordinates()), len(ob.array())) V = dolfin.FunctionSpace(mesh, "CG", 1) u = dolfin.Function(V) v2d = dolfin.vertex_to_dof_map(V) u.vector()[v2d] = ob.array() elif "Function" in inputtype or "Expression" in inputtype: u = ob elif "ufl.mathfunctions" in inputtype: # not working u = ob elif "Mesh" in inputtype: mesh = ob elif "algebra" in inputtype: mesh = ob.ufl_domain() #print('algebra', ob.ufl_domain()) if "str" in inputtype: mesh = dolfin.Mesh(ob) if u and not mesh and hasattr(u, "function_space"): V = u.function_space() if V: mesh = V.mesh() if u and not mesh and hasattr(u, "mesh"): mesh = u.mesh() #printc('------------------------------------') #printc('mesh.topology dim=', mesh.topology().dim()) #printc('mesh.geometry dim=', mesh.geometry().dim()) #if u: printc('u.value_rank()', u.value_rank()) #if u and u.value_rank(): printc('u.value_dimension()', u.value_dimension(0)) # axis=0 ##if u: printc('u.value_shape()', u.value_shape()) return (mesh, u)
def MeshStreamLines(*inputobj, **options): """ Build streamplot. """ from vedo.base import streamLines print('Building streamlines...') tol = options.pop('tol', 0.02) lw = options.pop('lw', 2) direction = options.pop('direction', 'forward') maxPropagation = options.pop('maxPropagation', None) scalarRange = options.pop('scalarRange', None) probes = options.pop('probes', None) tubes = options.pop('tubes', dict()) # todo maxRadiusFactor = options.pop('maxRadiusFactor', 1) varyRadius = options.pop('varyRadius', 1) mesh, u = _inputsort(inputobj) if not mesh: return None u_values = _compute_uvalues(u, mesh) if not utils.isSequence(u_values[0]): printc("~times Error: cannot show Arrows for 1D scalar values!", c=1) raise RuntimeError() if u_values.shape[1] == 2: # u_values is 2D u_values = np.insert(u_values, 2, 0, axis=1) # make it 3d meshact = MeshActor(u) meshact.addPointArray(u_values, 'u_values') if utils.isSequence(probes): pass # it's already it elif tol: print('decimating mesh points to use them as seeds...') probes = meshact.clone().clean(tol).points() else: probes = meshact.points() if len(probes) > 500: printc('Probing domain with n =', len(probes), 'points') printc(' ..this may take time (or choose a larger tol value)') if lw: tubes = dict() else: tubes['varyRadius'] = varyRadius tubes['maxRadiusFactor'] = maxRadiusFactor str_lns = streamLines(meshact, probes, direction=direction, maxPropagation=maxPropagation, tubes=tubes, scalarRange=scalarRange, activeVectors='u_values') if lw: str_lns.lw(lw) return str_lns
def printc(*strings, **keys): """ Print to terminal in color (any color!). :param c: foreground color name or (r,g,b) :param bc: background color name or (r,g,b) :param bool bold: boldface [True] :param bool italic: italic [False] :param bool blink: blinking text [False] :param bool underline: underline text [False] :param bool strike: strike through text [False] :param bool dim: make text look dimmer [False] :param bool invert: invert background and forward colors [False] :param box: print a box with specified text character [''] :param bool flush: flush buffer after printing [True] :param str end: the end character to be printed [newline] :param bool debug: print debug information about the evironment :Example: .. code-block:: python from vedo.colors import printc printc('anything', c='tomato', bold=False, end='' ) printc('anything', 455.5, vtkObject, c='lightblue') printc(299792.48, c=4) .. hint:: |colorprint.py|_ |colorprint| """ end = keys.pop("end", "\n") flush = keys.pop("flush", True) if not settings.enablePrintColor: print(*strings, end=end) if flush: sys.stdout.flush() return if not settings.notebookBackend: if not _terminal_has_colors: print(*strings, end=end) if flush: sys.stdout.flush() return c = keys.pop("c", None) bc = keys.pop("bc", None) bold = keys.pop("bold", True) italic = keys.pop("italic", False) blink = keys.pop("blink", False) underline = keys.pop("underline", False) strike = keys.pop("strike", False) dim = keys.pop("dim", False) invert = keys.pop("invert", False) box = keys.pop("box", "") dbg = keys.pop("debug", False) if c is True: c = "green" elif c is False: c = "red" if box is True: box = '-' if c is not None: c = getColor(c) if bc is not None: bc = getColor(bc) try: # ------------------------------------------------------------- txt = str() ns = len(strings) - 1 separator = " " offset = 0 for i, s in enumerate(strings): if i == ns: separator = "" if "\\" in repr(s): # "in" for some reasons changes s from vedo.shapes import _reps for k in emoji.keys(): if k in str(s): s = s.replace(k, emoji[k]) offset += 1 for k, rp in _reps: # check symbols in shapes._reps if k in str(s): s = s.replace(k, rp) offset += 1 txt += str(s) + separator special, cseq = "", "" if c is not None: r, g, b = c cseq += "\x1b[38;2;" + str(int(r * 255)) + ";" + str(int( g * 255)) + ";" + str(int(b * 255)) + "m" if bc: r, g, b = bc cseq += "\x1b[48;2;" + str(int(r * 255)) + ";" + str(int( g * 255)) + ";" + str(int(b * 255)) + "m" if underline and not box: special += "\x1b[4m" if strike and not box: special += "\x1b[9m" if dim: special += "\x1b[2m" if invert: special += "\x1b[7m" if bold: special += "\x1b[1m" if italic: special += "\x1b[3m" if blink: special += "\x1b[5m" if box and not ("\n" in txt): if len(box) > 1: box = box[0] if box in ["_", "=", "-", "+", "~"]: boxv = "|" else: boxv = box if box == "_" or box == ".": outtxt = special + cseq + " " + box * (len(txt) + offset + 2) + " \n" outtxt += boxv + " " * (len(txt) + 2) + boxv + "\n" else: outtxt = special + cseq + box * (len(txt) + offset + 4) + "\n" outtxt += boxv + " " + txt + " " + boxv + "\n" if box == "_": outtxt += "|" + box * (len(txt) + offset + 2) + "|" + "\x1b[0m" + end else: outtxt += box * (len(txt) + offset + 4) + "\x1b[0m" + end sys.stdout.write(outtxt) else: out = special + cseq + txt + "\x1b[0m" if dbg: from inspect import currentframe, getframeinfo from vedo.utils import isSequence, precision cf = currentframe().f_back cfi = getframeinfo(cf) fname = os.path.basename(getframeinfo(cf).filename) print("\x1b[7m\x1b[3m\x1b[37m" + fname + " line:\x1b[1m" + str(cfi.lineno) + "\x1b[0m", end='') print('\x1b[3m\x1b[37m\x1b[2m', "\U00002501" * 30, time.ctime(), "\x1b[0m") if txt: print(" \x1b[37m\x1b[1mMessage : " + out) print(" \x1b[37m\x1b[1mFunction:\x1b[0m\x1b[37m " + str(cfi.function)) print(' \x1b[1mLocals :\x1b[0m') for loc in cf.f_locals.keys(): obj = cf.f_locals[loc] var = repr(obj) if 'module ' in var: continue if 'function ' in var: continue if 'class ' in var: continue if '_' in loc: continue if hasattr(obj, 'name'): if not obj.name: oname = str(type(obj)) else: oname = obj.name var = oname + ', at ' + precision(obj.GetPosition(), 3) print(' \x1b[37m', loc, '=', var[:60].replace('\n', ''), '\x1b[0m') if isSequence(obj) and len(obj) > 4: print( ' \x1b[37m\x1b[2m\x1b[3m len:', len(obj), ' min:', precision(min(obj), 4), ' max:', precision(max(obj), 4), # ' mean:', np.mean(np.array(obj)), '\x1b[0m') print(" \x1b[1m\x1b[37mElapsed time:\x1b[0m\x1b[37m", str(time.time() - _global_start_time)[:6], 's\x1b[0m') else: sys.stdout.write(out + end) except: # ------------------------------------------------------------- print(*strings, end=end) if flush: sys.stdout.flush()
def __init__( self, inputobj=None, c=('r', 'y', 'lg', 'lb', 'b'), #('b','lb','lg','y','r') alpha=(0.5, 1), alphaUnit=1, mapper='tetra', ): BaseGrid.__init__(self) self.useArray = 0 inputtype = str(type(inputobj)) #printc('TetMesh inputtype', inputtype) ################### if inputobj is None: self._data = vtk.vtkUnstructuredGrid() elif isinstance(inputobj, vtk.vtkUnstructuredGrid): self._data = inputobj elif isinstance(inputobj, vtk.vtkRectilinearGrid): r2t = vtk.vtkRectilinearGridToTetrahedra() r2t.SetInputData(inputobj) r2t.RememberVoxelIdOn() r2t.SetTetraPerCellTo6() r2t.Update() self._data = r2t.GetOutput() elif isinstance(inputobj, vtk.vtkDataSet): r2t = vtk.vtkDataSetTriangleFilter() r2t.SetInputData(inputobj) #r2t.TetrahedraOnlyOn() r2t.Update() self._data = r2t.GetOutput() elif isinstance(inputobj, str): from vedo.io import download, loadUnStructuredGrid if "https://" in inputobj: inputobj = download(inputobj, verbose=False) ug = loadUnStructuredGrid(inputobj) tt = vtk.vtkDataSetTriangleFilter() tt.SetInputData(ug) tt.SetTetrahedraOnly(True) tt.Update() self._data = tt.GetOutput() elif utils.isSequence(inputobj): # if "ndarray" not in inputtype: # inputobj = np.array(inputobj) self._data = self._buildtetugrid(inputobj[0], inputobj[1]) ################### if 'tetra' in mapper: self._mapper = vtk.vtkProjectedTetrahedraMapper() elif 'ray' in mapper: self._mapper = vtk.vtkUnstructuredGridVolumeRayCastMapper() elif 'zs' in mapper: self._mapper = vtk.vtkUnstructuredGridVolumeZSweepMapper() elif isinstance(mapper, vtk.vtkMapper): self._mapper = mapper else: printc('Unknown mapper type', [mapper], c='r') raise RuntimeError() self._mapper.SetInputData(self._data) self.SetMapper(self._mapper) self.color(c).alpha(alpha) if alphaUnit: self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit) # remember stuff: self._color = c self._alpha = alpha self._alphaUnit = alphaUnit
def __init__(self, inputobj=None): vtk.vtkImageSlice.__init__(self) Base3DProp.__init__(self) BaseVolume.__init__(self) self._mapper = vtk.vtkImageResliceMapper() self._mapper.SliceFacesCameraOn() self._mapper.SliceAtFocalPointOn() self._mapper.SetAutoAdjustImageQuality(False) self._mapper.BorderOff() self.lut = None self.property = vtk.vtkImageProperty() self.property.SetInterpolationTypeToLinear() self.SetProperty(self.property) ################### if isinstance(inputobj, str): if "https://" in inputobj: from vedo.io import download inputobj = download(inputobj, verbose=False) # fpath elif os.path.isfile(inputobj): pass else: inputobj = sorted(glob.glob(inputobj)) ################### inputtype = str(type(inputobj)) if inputobj is None: img = vtk.vtkImageData() if isinstance(inputobj, Volume): img = inputobj.imagedata() self.lut = utils.ctf2lut(inputobj) elif utils.isSequence(inputobj): if isinstance(inputobj[0], str): # scan sequence of BMP files ima = vtk.vtkImageAppend() ima.SetAppendAxis(2) pb = utils.ProgressBar(0, len(inputobj)) for i in pb.range(): f = inputobj[i] picr = vtk.vtkBMPReader() picr.SetFileName(f) picr.Update() mgf = vtk.vtkImageMagnitude() mgf.SetInputData(picr.GetOutput()) mgf.Update() ima.AddInputData(mgf.GetOutput()) pb.print('loading...') ima.Update() img = ima.GetOutput() else: if "ndarray" not in inputtype: inputobj = np.array(inputobj) if len(inputobj.shape) == 1: varr = utils.numpy2vtk(inputobj, dtype=float) else: if len(inputobj.shape) > 2: inputobj = np.transpose(inputobj, axes=[2, 1, 0]) varr = utils.numpy2vtk(inputobj.ravel(order='F'), dtype=float) varr.SetName('input_scalars') img = vtk.vtkImageData() img.SetDimensions(inputobj.shape) img.GetPointData().AddArray(varr) img.GetPointData().SetActiveScalars(varr.GetName()) elif "ImageData" in inputtype: img = inputobj elif isinstance(inputobj, Volume): img = inputobj.inputdata() elif "UniformGrid" in inputtype: img = inputobj elif hasattr( inputobj, "GetOutput"): # passing vtk object, try extract imagdedata if hasattr(inputobj, "Update"): inputobj.Update() img = inputobj.GetOutput() elif isinstance(inputobj, str): from vedo.io import loadImageData, download if "https://" in inputobj: inputobj = download(inputobj, verbose=False) img = loadImageData(inputobj) else: colors.printc("VolumeSlice: cannot understand input type:\n", inputtype, c='r') return self._data = img self._mapper.SetInputData(img) self.SetMapper(self._mapper)