def apply_translation(self, *args): if self.view and self.level: rmatrix = np.eye(4) r = rotation_matrix((self.model.angle_z.get() * np.pi / 180, self.model.angle_y.get() * np.pi / 180, self.model.angle_x.get() * np.pi / 180)) rmatrix[:3, :3] = r[::-1, ::-1] cmatrix = np.array([ self.model.center_x.get() / self.level, self.model.center_y.get() / self.level, self.model.center_z.get() / self.level ]) tmatrix = np.array([ self.model.offset_x.get() / self.level, self.model.offset_y.get() / self.level, self.model.offset_z.get() / self.level ]) # # The last of 4 columns of the affine transform is the # translation. The translation is the rotated center minus the # offset plus the center, not rotated. # rmatrix[3, :3] = rmatrix[:3, :3].dot(tmatrix - cmatrix) + cmatrix rotate_transform = MatrixTransform(rmatrix) self.translation_frame.transform = rotate_transform self.center_frame.transform = MatrixTransform( np.array([[1, 0, 0, cmatrix[0]], [0, 1, 0, cmatrix[1]], [0, 0, 1, cmatrix[2]], [0, 0, 0, 1]])) self.scene.update()
def as_matrix_transform(transform): """ Simplify a transform to a single matrix transform, which makes it a lot faster to compute transformations. Raises a TypeError if the transform cannot be simplified. """ if isinstance(transform, ChainTransform): matrix = np.identity(4) for tr in transform.transforms: # We need to do the matrix multiplication manually because VisPy # somehow doesn't mutliply matrices if there is a perspective # component. The equation below looks like it's the wrong way # around, but the VisPy matrices are transposed. matrix = np.matmul(as_matrix_transform(tr).matrix, matrix) return MatrixTransform(matrix) elif isinstance(transform, InverseTransform): matrix = as_matrix_transform(transform._inverse) return MatrixTransform(matrix.inv_matrix) elif isinstance(transform, NullTransform): return MatrixTransform() elif isinstance(transform, STTransform): return transform.as_matrix() elif isinstance(transform, MatrixTransform): return transform else: raise TypeError("Could not simplify transform of type {0}".format( type(transform)))
def __init__(self): vispy.app.Canvas.__init__(self, keys='interactive', size=(800, 800)) # Create 4 copies of an image to be displayed with different transforms image = get_image() self.images = [visuals.ImageVisual(image, method='impostor') for i in range(4)] # Transform all images to a standard size / location (because # get_image() might return unexpected sizes) s = 100. / max(self.images[0].size) tx = 0.5 * (100 - (self.images[0].size[0] * s)) ty = 0.5 * (100 - (self.images[0].size[1] * s)) base_tr = STTransform(scale=(s, s), translate=(tx, ty)) self.images[0].transform = (STTransform(scale=(30, 30), translate=(600, 600)) * SineTransform() * STTransform(scale=(0.1, 0.1), translate=(-5, -5)) * base_tr) tr = MatrixTransform() tr.rotate(40, (0, 0, 1)) tr.rotate(30, (1, 0, 0)) tr.translate((0, -20, -60)) p = MatrixTransform() p.set_perspective(0.5, 1, 0.1, 1000) tr = p * tr tr1 = (STTransform(translate=(200, 600)) * tr * STTransform(translate=(-50, -50)) * base_tr) self.images[1].transform = tr1 tr2 = (STTransform(scale=(3, -100), translate=(200, 50)) * LogTransform((0, 2, 0)) * STTransform(scale=(1, -0.01), translate=(-50, 1.1)) * base_tr) self.images[2].transform = tr2 tr3 = (STTransform(scale=(400, 400), translate=(570, 400)) * PolarTransform() * STTransform(scale=(np.pi/150, -0.005), translate=(-3.3*np.pi/4., 0.7)) * base_tr) self.images[3].transform = tr3 text = visuals.TextVisual( text=['logarithmic', 'polar', 'perspective', 'custom (sine)'], pos=[(100, 20), (500, 20), (100, 410), (500, 410)], color='k', font_size=16) self.visuals = self.images + [text] self.show()
def _niimg_rot(): """Get rotation trnasformations of each slice.""" # Sagittal sg_rot = MatrixTransform() sg_rot.rotate(90., (0, 0, 1)) sg_rot.rotate(180., (0, 1, 0)) # Coronal cr_rot = MatrixTransform() cr_rot.rotate(90., (0, 0, 1)) cr_rot.rotate(180., (0, 1, 0)) # Axial ax_rot = MatrixTransform() ax_rot.rotate(180., (1, 0, 0)) return sg_rot, cr_rot, ax_rot
def __init__(self, _model: AtlasSectionViewModel): self._model = _model self._model.register(self.update) self._canvas = SceneCanvas() self._viewbox = ViewBox(parent=self._canvas.scene) self._canvas.central_widget.add_widget(self._viewbox) self._viewbox.camera = TurntableCamera( interactive=False, fov=0, # Makes it an ortho camera. azimuth=0, elevation=90, ) self._slice = Image(cmap='grays', parent=self._viewbox.scene) self._slice.transform = MatrixTransform() self._slice.set_data(self._model.atlas_section_image) self._slice.clim = self._model.clim self._viewbox.camera.center = self._model.camera_center self._viewbox.camera.scale_factor = self._viewbox.camera.scale_factor self._vertical_line = InfiniteLine(pos=0, vertical=True, parent=self._viewbox.scene) self._horizontal_line = InfiniteLine(pos=0, vertical=False, parent=self._viewbox.scene) self._canvas.events.mouse_press.connect(self.mouse_press) self._canvas.events.mouse_move.connect(self.mouse_move) self._vertical_line.set_data(color=self._model.vertical_line_color) self._horizontal_line.set_data(color=self._model.horizontal_line_color)
def axis_visual(scale=1.0, parent=None): """ Returns a :class:`vispy.scene.visuals.XYZAxis` class instance using given scale. Parameters ---------- scale : numeric, optional Axis visual scale. parent : Node, optional Parent of the axis visual in the `SceneGraph`. Returns ------- XYZAxis Axis visual. """ axis = XYZAxis(parent=parent) transform = MatrixTransform() transform.scale((scale, scale, scale)) axis.transform = transform return axis
def _createShapeNode(self, shape): shapeType = shape.getType() if shapeType == dynamics.SphereShape.getStaticType(): self.shapeNode = SphereShapeNode(shape, parent=self) elif shapeType == dynamics.BoxShape.getStaticType(): self.shapeNode = BoxShapeNode(shape, parent=self) # elif shapeType == dynamics.EllipsoidShape.getStaticType(): # print(shapeType) elif shapeType == dynamics.CylinderShape.getStaticType(): self.shapeNode = CylinderShapeNode(shape, parent=self) elif shapeType == dynamics.CapsuleShape.getStaticType(): self.shapeNode = CapsuleShapeNode(shape, parent=self) # elif shapeType == dynamics.ConeShape.getStaticType(): # print(shapeType) # elif shapeType == dynamics.PlaneShape.getStaticType(): # print(shapeType) # elif shapeType == dynamics.MultiSphereConvexHullShape.getStaticType(): # print(shapeType) elif shapeType == dynamics.MeshShape.getStaticType(): self.shapeNode = MeshShapeNode(shape, parent=self) # elif shapeType == dynamics.SoftMeshShape.getStaticType(): # print(shapeType) # elif shapeType == dynamics.LineSegmentShape.getStaticType(): # print(shapeType) else: print( "{} is an unsupported shape type. Ping JS to implement this.") if self.shapeNode is None: return self.shapeNode.transform = MatrixTransform()
def __init__(self, source=None, **kwargs): super(PolarImage, self).__init__(**kwargs) self.unfreeze() # source should be an object, which contains information about # a specific radar source self.source = source # source should contain the radar coordinates in some usable format # here I assume offset from lower left (0,0) if source is not None: xoff = source['X'] yoff = source['Y'] else: xoff = 0 yoff = 0 # this takes the image sizes and uses it for transformation self.theta = self._data.shape[0] self.range = self._data.shape[1] # PTransform takes care of making PPI from data array # rot rotates the ppi 180 deg (image origin is upper left) # the translation moves the image to centere the ppi rot = MatrixTransform() rot.rotate(180, (0, 0, 1)) self.transform = ( STTransform(translate=(self.range + xoff, self.range + yoff, 0)) * rot * PTransform()) self.freeze()
def load_maze(self, maze_file, mirror=True, maze_coord_file=None): self.maze = Maze(maze_file, maze_coord_file) #color='gray' self.scale_factor = 100 self.origin = -np.array(self.maze.coord['Origin']).astype( np.float32) * self.scale_factor self.origin_hd = np.arctan2(-self.origin[1], self.origin[0]) / np.pi * 180 self.border = np.array(self.maze.coord['border']).astype(np.float32) self.x_range = (self.origin[0] + self.border[0] * self.scale_factor, self.origin[0] + self.border[2] * self.scale_factor) self.y_range = (self.origin[1] + self.border[1] * self.scale_factor, self.origin[1] + self.border[3] * self.scale_factor) self._arrow_len = (self.x_range[1] - self.x_range[0]) / 10 # self.marker.move(self.origin[:2]) # self.current_pos = self.origin[:2] ### MatrixTransform perform Affine Transform transform = MatrixTransform() # transform.rotate(angle=90, axis=(1, 0, 0)) # rotate around x-axis for 90, the maze lay down if mirror: self.mirror = True transform.matrix[:, 2] = -transform.matrix[:, 2] # reflection matrix, mirror image on x-y plane transform.scale( scale=4 * [self.scale_factor]) # scale at all 4 dim for scale_factor transform.translate(pos=self.origin) # translate to origin self.maze.transform = transform self.view.add(self.maze) self.set_range() print('Origin:', self.origin) print('border:', self.border)
def on_atlas_update(self, volume: ndarray, transform: ndarray): self._atlas_volume.set_data(volume, clim=(np.min(volume), np.max(volume))) self._atlas_volume.transform = MatrixTransform(transform.T) self._viewbox.camera.center = (0, 0, 0) self._viewbox.camera.scale_factor = transform[0, 0] * volume.shape[0] self._canvas.update()
def __init__(self): app.Canvas.__init__(self, keys='interactive', size=(800, 550)) vertices, faces, outline = create_box(width=1, height=1, depth=1, width_segments=4, height_segments=8, depth_segments=16) self.box = visuals.BoxVisual(width=1, height=1, depth=1, width_segments=4, height_segments=8, depth_segments=16, vertex_colors=vertices['color'], edge_color='b') self.theta = 0 self.phi = 0 self.transform = MatrixTransform() self.box.transform = self.transform self.show() self.timer = app.Timer(connect=self.rotate) self.timer.start(0.016)
def __init__(self, keys='interactive', size=(640, 480), show=True, **kwargs): super().__init__(keys=keys, size=size, show=show, **kwargs) self.unfreeze() self._viewbox = self.central_widget.add_view(camera='turntable') self._baseAxis = visuals.XYZAxis(parent=self._viewbox.scene, width=5) self._gridLines = visuals.GridLines() self._viewbox.add(self._gridLines) self._cubeAxis = visuals.XYZAxis(parent=self._viewbox.scene, width=5) Plot3D = scene.visuals.create_visual_node(LinePlotVisual) self._plot = Plot3D(([0], [0], [0]), width=3.0, color='y', edge_color='w', symbol='x', face_color=(0.2, 0.2, 1, 0.8), parent=self._viewbox.scene) self._xPos = np.array([0], dtype=np.float32) self._yPos = np.array([0], dtype=np.float32) self._zPos = np.array([0], dtype=np.float32) self._cube = visuals.Cube(parent=self._viewbox.scene, color=(0.5, 0.5, 1, 0.5), edge_color=(0.6, 0.2, 0.8, 1)) self._transform = MatrixTransform() self._cube.transform = self._transform self._cubeAxis.transform = self._transform self.freeze()
def __init__(self): app.Canvas.__init__(self, keys='interactive', size=(800, 550)) self.meshes = [] self.rotation = MatrixTransform() # Generate some data to work with global mdata mdata = create_sphere(20, 40, 1.0) # Mesh with pre-indexed vertices, uniform color self.meshes.append(visuals.MeshVisual(meshdata=mdata, color='b')) # Mesh with pre-indexed vertices, per-face color # Because vertices are pre-indexed, we get a different color # every time a vertex is visited, resulting in sharp color # differences between edges. verts = mdata.get_vertices(indexed='faces') nf = verts.size // 9 fcolor = np.ones((nf, 3, 4), dtype=np.float32) fcolor[..., 0] = np.linspace(1, 0, nf)[:, np.newaxis] fcolor[..., 1] = np.random.normal(size=nf)[:, np.newaxis] fcolor[..., 2] = np.linspace(0, 1, nf)[:, np.newaxis] mesh = visuals.MeshVisual(vertices=verts, face_colors=fcolor) self.meshes.append(mesh) # Mesh with unindexed vertices, per-vertex color # Because vertices are unindexed, we get the same color # every time a vertex is visited, resulting in no color differences # between edges. verts = mdata.get_vertices() faces = mdata.get_faces() nv = verts.size // 3 vcolor = np.ones((nv, 4), dtype=np.float32) vcolor[:, 0] = np.linspace(1, 0, nv) vcolor[:, 1] = np.random.normal(size=nv) vcolor[:, 2] = np.linspace(0, 1, nv) self.meshes.append(visuals.MeshVisual(verts, faces, vcolor)) self.meshes.append( visuals.MeshVisual(verts, faces, vcolor, shading='flat')) self.meshes.append( visuals.MeshVisual(verts, faces, vcolor, shading='smooth')) # Lay out meshes in a grid grid = (3, 3) s = 300. / max(grid) for i, mesh in enumerate(self.meshes): x = 800. * (i % grid[0]) / grid[0] + 400. / grid[0] - 2 y = 800. * (i // grid[1]) / grid[1] + 400. / grid[1] + 2 transform = ChainTransform([ STTransform(translate=(x, y), scale=(s, s, s)), self.rotation ]) mesh.transform = transform mesh.transforms.scene_transform = STTransform(scale=(1, 1, 0.01)) self.show() self.timer = app.Timer(connect=self.rotate) self.timer.start(0.016)
def _colorbar_for_surf(colormap, limits): colorbar = ColorBar(colormap, 'top', (50, 10), clim=limits) tr = MatrixTransform() tr.rotate(-90, (0, 1, 0)) tr.translate((0, -100, 50)) colorbar.transform = tr return colorbar
def plot_line_vispy(points, clf=True, tube_radius=1., colour=None, zero_centroid=True, closed=False, mus=None, cmap=None, tube_points=8, **kwargs): # Add an extra point to fix tube drawing bug last_tangent = points[-1] - points[-2] points = n.vstack([points, points[-1] + 0.0001 * last_tangent]) ensure_vispy_canvas() if clf: clear_vispy_canvas() canvas = vispy_canvas from vispy import app, scene, color if isinstance(cmap, str): from matplotlib.cm import get_cmap mpl_cmap = get_cmap(cmap) cmap = lambda v: n.array(mpl_cmap(v)) cmap = cmap or (lambda c: hsv_to_rgb(c, 1, 1)) if colour is None: colours = n.linspace(0, 1, len(points)) colours = n.array([cmap(c) for c in colours]) else: colours = color.ColorArray(colour) if mus is not None: colours = n.array([hsv_to_rgb(c, 1, 1) for c in mus]) l = scene.visuals.Tube(points, color=colours, shading='smooth', radius=tube_radius, tube_points=tube_points, closed=closed) canvas.view.add(l) # canvas.view.camera = 'arcball' canvas.view.camera = scene.ArcballCamera(fov=30, distance=7.5 * n.max(n.abs(points))) #canvas.view.camera = scene.TurntableCamera(fov=30) if zero_centroid: l.transform = MatrixTransform() # l.transform = scene.transforms.AffineTransform() l.transform.translate(-1 * n.average(points, axis=0)) canvas.show() # import ipdb # ipdb.set_trace() return canvas
def _master_transform(self): """vispy.visuals.transforms.MatrixTransform: Central node's firstmost transform. """ # whenever a new parent is set, the transform is reset # to a NullTransform so we reset it here if not isinstance(self.node.transform, MatrixTransform): self.node.transform = MatrixTransform() return self.node.transform
def updateSlice(self,cutAt): self.sectionTo = cutAt self.volume.set_data(self.vol_data[:,0:self.sectionTo,:]) self.section2D = self.vol_data[:,self.sectionTo,:] self.plane.set_data(self.section2D) self.plane.transform = MatrixTransform() self.plane.transform.rotate(90, (1,0,0)) self.plane.transform.translate((0,self.sectionTo,0))
def __init__(self, state_vec, p): self.state_vec = state_vec self.n = 0 self.n_max = len(state_vec) self.p = p self.radius = p/6.0 self.rot = MatrixTransform() self.v_orient = None self.x = 0. self.y = 0. self.z = 0.
def __init__(self, view, face_color, state_vec, orient_vec): super(mbVector, self).__init__(10, 10, 0.05, 1., 0.1, 0.25, color=face_color, shading="smooth", parent=view) self.unfreeze() self.n = 0 self.n_max = len(state_vec) self.trafo = MatrixTransform() self.state_vec = state_vec self.orient_vec = orient_vec
def plot_lines_vispy(lines, clf=True, tube_radius=1., colours=None, zero_centroid=True, tube_points=8, closed=False, **kwargs): ensure_vispy_canvas() if clf: clear_vispy_canvas() canvas = vispy_canvas from vispy import app, scene, color if not isinstance(tube_radius, list): tube_radius = [tube_radius for _ in range(len(lines))] if colours is None: colours = ['purple' for line in lines] tubes = [] for colour, points, radius in zip(colours, lines, tube_radius): l = scene.visuals.Tube(points, color=colour, shading='smooth', radius=radius, closed=closed, tube_points=tube_points) tubes.append(l) from .visualcollection import MeshCollection collection = MeshCollection(tubes) canvas.view.add(collection) canvas.view.camera = 'arcball' canvas.view.camera.fov = 30 # canvas.view.camera = scene.TurntableCamera( # fov=90, up='z', distance=1.2*n.max(n.max( # points, axis=0))) if zero_centroid: l.transform = MatrixTransform() # l.transform = scene.transforms.AffineTransform() l.transform.translate(-1 * n.average(points, axis=0)) canvas.show() return canvas
def __init__(self): scene.SceneCanvas.__init__(self, keys='interactive', size=(960, 960), show=True, bgcolor='black', title='MRI', vsync=False) self.unfreeze() self.view = self.central_widget.add_view() # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_thumbnail.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_scoreMap_2.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_scoreMap_1.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_scoreMap_9.npz')['arr_0'] # self.vol_data = self.vol_data / self.vol_data.max() # self.vol_data[self.vol_data < .5] = 0 self.vol_data = bp.unpack_ndarray_file('/home/yuncong/CSHL_volumes/volume_MD589_annotation.bp') # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_annotationAllClasses.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_labelmap.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD594_predMap.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_predMap.npz')['arr_0'] self.vol_data = self.vol_data[::2,::2,::2].astype(np.float)/9. # self.vol_data = np.flipud(np.rollaxis(self.vol_data, 1)) # self.sectionTo = 150 self.sectionTo = 50 colors = np.loadtxt('/home/yuncong/Brain/visualization/100colors.txt') # self.volume = scene.visuals.Volume(self.vol_data[:,0:self.sectionTo,:], parent=self.view.scene, cmap=get_colormap('coolwarm')) self.volume = scene.visuals.Volume(self.vol_data[:,0:self.sectionTo,:], parent=self.view.scene, method='mip', cmap=Colormap([(0,0,0),(0,1,0), (1,0,0), (0,1,0), (0,0,1), (1,1,0), (0,1,1), (1,1,0),(1,0.5,0),(0,0.5,0),(0,0,1)], interpolation='linear')) self.volume.transform = scene.STTransform(translate=(0,0,0)) CMAP = self.volume.cmap self.section2D = self.vol_data[:,self.sectionTo,:] self.plane = scene.visuals.Image(self.section2D, parent=self.view.scene, cmap=CMAP, relative_step_size=1.5) # self.plane.transform = scene.STTransform(translate=(0,self.sectionTo,0)) # self.plane.transform = scene.STTransform(translate=(0,0,0)) self.plane.transform = MatrixTransform() self.plane.transform.rotate(90, (1,0,0)) self.plane.transform.translate((0,self.sectionTo,0)) self.plane.attach(BlackToAlpha()) self.view.camera = scene.cameras.ArcballCamera(parent=self.view.scene)
def __init__(self, size=(500, 10), cmap='grays', clim=None, label_str="Colorbar", label_color='black', label_size=12, tick_size=10, border_width=1.0, border_color='black', visible=True, parent=None): assert clim is not None, 'clim must be specified explicitly.' # Create a scene.visuals.Image (without parent by default). scene.visuals.Image.__init__(self, parent=None, interpolation='nearest', method='auto') self.unfreeze() self.visible = visible self.canvas_size = None # will be set when parent is linked # Record the important drawing parameters. self.pos = (0, 0) self.bar_size = size # tuple self.cmap = get_colormap(cmap) # vispy Colormap self.clim = clim # tuple # Record the styling parameters. self.label_str = label_str self.label_color = label_color self.label_size = label_size self.tick_size = tick_size self.border_width = border_width self.border_color = border_color # Draw colorbar using Matplotlib. self.set_data(self._draw_colorbar()) # Give a Matrix transform to self in order to move around canvas. self.transform = MatrixTransform() self.freeze()
def __init__(self, loc=(80, 72), size=60, seismic_coord_system=True, width=2, antialias=True, visible=True, parent=None): # Create a scene.visuals.XYZAxis (without parent by default). scene.visuals.XYZAxis.__init__(self, parent=parent, width=width, antialias=antialias) self.interactive = True self.unfreeze() self.visible = visible self.canvas_size = None # will be set when parent is linked # Determine the size and position. self.loc = loc self.size = size # z-axis down seismic coordinate system, or z-axis up normal system. self.seismic_coord_system = seismic_coord_system # The selection highlight (a Ellipse visual with transparent color). # The circle is centered on the axis legend. self.highlight = scene.visuals.Ellipse( parent=parent, center=self.loc, radius=self.size, color=(1, 1, 0, 0.5)) # transparent yellow color self.highlight.visible = False # only show when selected # Set the anchor point (2D screen coordinates). The mouse will # drag the axis by anchor point to move around the screen. self.anchor = None # None by default self.offset = np.array([0, 0]) # The axis legend is rotated to align with the parent camera. Then put # the legend to specified location and scale up to desired size. # The location is computed from the top-left corner. self.transform = MatrixTransform() self.freeze()
def array_to_stt(arr): """Turn a 4x4 array into a scale and translate matrix transformation. Parameters ---------- arr : array_like A 4x4 array. Returns ------- transform : VisPy.transform The VisPy transformation. """ arr[-1, 0:-1] = arr[0:-1, -1] arr[0:-1, -1] = 0. transform = MatrixTransform(arr) # transform.scale(np.diag(arr)[0:-1]) # transform.translate(arr[0:-1, -1]) return transform
def __init__(self): app.Canvas.__init__(self, keys='interactive', size=(800, 800)) # Define several Line visuals that use the same position data # but have different colors and transformations colors = [color, (1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1), (1, 1, 0, 1), (1, 1, 1, 1)] self.lines = [visuals.LineVisual(pos=pos, color=colors[i]) for i in range(6)] center = STTransform(translate=(400, 400)) self.lines[0].transform = center self.lines[1].transform = (center * STTransform(scale=(1, 0.1, 1))) self.lines[2].transform = (center * STTransform(translate=(200, 200, 0)) * STTransform(scale=(0.3, 0.5, 1))) self.lines[3].transform = (center * STTransform(translate=(-200, -200, 0), scale=(200, 1)) * LogTransform(base=(10, 0, 0)) * STTransform(translate=(1, 0, 0))) self.lines[4].transform = MatrixTransform() self.lines[4].transform.rotate(45, (0, 0, 1)) self.lines[4].transform.scale((0.3, 0.3, 1)) self.lines[4].transform.translate((200, 200, 0)) self.lines[5].transform = (STTransform(translate=(200, 600, 0), scale=(5, 5)) * PolarTransform() * LogTransform(base=(2, 0, 0)) * STTransform(scale=(0.01, 0.1), translate=(4, 20))) self.show()
def _updateShapeData(self, firstTime): if firstTime: assimpMatrix = self.assimpNode.contents.mTransformation self.transform = MatrixTransform() # TODO(JS): Necessary? self.transform.matrix = _fromAssimpMatrixToNumpyMatrix( assimpMatrix) for mesh in self.assimpNode.meshes: self.assimpMeshNodes += [ AssimpMeshNode(assimpMesh=mesh, assimpScene=self.assimpScene, parent=self) ] for child in self.assimpNode.children: self.assimpChildNodes += [ AssimpNodeNode(child, assimpScene=self.assimpScene, parent=self) ] else: for assimpChildNode in self.assimpChildNodes: assimpChildNode.refresh()
def array_to_stt(arr): """Turn a (4, 4) array into a scale and translate matrix transformation. Parameters ---------- arr : array_like A (4, 4) array. Returns ------- transform : VisPy.transform The VisPy transformation. """ assert isinstance(arr, np.ndarray) and arr.shape == (4, 4) _arr = arr.copy() _arr[-1, 0:-1] = _arr[0:-1, -1] _arr[0:-1, -1] = 0. transform = MatrixTransform(_arr) # transform.scale(np.diag(_arr)[0:-1]) # transform.translate(_arr[0:-1, -1]) return transform
def _niimg_mat(hdr, idx): """Get the transformation of a single slice. Parameters ---------- hdr : array_like The (4, 4) transformation array. idx : tuple Slices indicies. Returns ------- tf : MatrixTransform Image transformation. """ hdr_mat = np.array(hdr.matrix).copy().T mat = np.identity(4, dtype=np.float32) to_idx = [[idx[0]], [idx[1]]], [idx[0], idx[1]] mat[[[0], [1]], [0, 1]] = hdr_mat[to_idx] mat[[0, 1], -1] = hdr_mat[[idx[0], idx[1]], -1] return MatrixTransform(mat.T)
def __init__(self): scene.SceneCanvas.__init__(self, keys='interactive', size=(960, 960), show=True, bgcolor='black', title='MRI', vsync=False) self.unfreeze() self.view = self.central_widget.add_view() self.vol_data = np.load( '/home/yuncong/CSHL_volumes/volume_MD589_thumbnail.npz')['arr_0'] # self.vol_data = np.flipud(np.rollaxis(self.vol_data, 1)) self.sectionTo = 150 self.volume = scene.visuals.Volume(self.vol_data[:, 0:self.sectionTo, :], parent=self.view.scene) self.volume.transform = scene.STTransform(translate=(0, 0, 0)) CMAP = self.volume.cmap self.section2D = self.vol_data[:, self.sectionTo, :] self.plane = scene.visuals.Image(self.section2D, parent=self.view.scene, cmap=CMAP, relative_step_size=1.5) # self.plane.transform = scene.STTransform(translate=(0,self.sectionTo,0)) # self.plane.transform = scene.STTransform(translate=(0,0,0)) self.plane.transform = MatrixTransform() self.plane.transform.rotate(90, (1, 0, 0)) self.plane.transform.translate((0, self.sectionTo, 0)) self.plane.attach(BlackToAlpha()) self.view.camera = scene.cameras.ArcballCamera(parent=self.view.scene)
def __init__(self, exp_df_path, trial_df_path, rig_leds_path): exp_df = pd.read_pickle(exp_df_path) trial_df = pd.read_pickle(trial_df_path) self.df = exp_df.join(trial_df.drop('block', axis=1), on='trial_number') self.rig_leds = np.load(rig_leds_path) self.precalculate_data() verts, faces, normals, nothin = vispy.io.read_mesh( os.path.join(fh.PACKAGE_DIR, '../datafiles', 'head.obj')) verts = np.einsum('ni,ji->nj', (verts - verts.mean(axis=0)), fh.from_yawpitchroll(180, 90, 0)) #verts = verts - verts.mean(axis=0) # add SceneCanvas first to create QApplication object before widget self.vispy_canvas = vispy.scene.SceneCanvas(create_native=True, vsync=True, show=True, bgcolor=(0.2, 0.2, 0.2, 0)) super(ExperimentVisualization, self).__init__() #self.setAttribute(Qt.WA_TranslucentBackground) self.timer = vispy.app.Timer(1 / 30, start=False, connect=self.advance_frame) self.n_trials = len(self.df) self.current_trial = 0 self.i_frame = 0 self.current_row = self.df.iloc[self.current_trial] self.current_R_helmet = None self.current_gaze_normals = None self.current_ref_points = None self.vispy_view = self.vispy_canvas.central_widget.add_view() self.vispy_view.camera = 'turntable' self.vispy_view.camera.center = self.rig_leds[127, :] + ( self.rig_leds[0, :] - self.rig_leds[127, :]) + ( self.rig_leds[254, :] - self.rig_leds[127, :]) self.vispy_view.camera.fov = 40 self.vispy_view.camera.distance = 1500 self.main_layout = QtWidgets.QVBoxLayout() self.setLayout(self.main_layout) self.frame_slider = QtWidgets.QSlider(Qt.Horizontal) self.frame_slider.setMinimum(0) self.frame_slider.valueChanged.connect(self.on_slider_change) self.trial_picker = QtWidgets.QSpinBox() self.trial_picker.setMaximum(self.n_trials) self.trial_picker.valueChanged.connect(self.on_picker_change) self.trial_changed.connect(self.load_trial) self.frame_changed.connect(self.load_frame) self.picker_slider_layout = QtWidgets.QHBoxLayout() self.main_layout.addWidget(self.vispy_canvas.native) self.main_layout.addLayout(self.picker_slider_layout) self.animation_button = QtWidgets.QPushButton('Start Animation') self.picker_slider_layout.addWidget(self.animation_button) self.animation_button.clicked.connect(self.toggle_animation) self.frame_label = QtWidgets.QLabel('Frame') self.picker_slider_layout.addWidget(self.frame_label) self.picker_slider_layout.addWidget(self.frame_slider) self.picker_slider_layout.addWidget(QtWidgets.QLabel('Trial')) self.picker_slider_layout.addWidget(self.trial_picker) self.rig_vis = visuals.Markers() self.rig_vis.set_gl_state(depth_test=False) self.rig_vis.antialias = 0 self.vispy_view.add(self.rig_vis) self.helmet_vis = visuals.Markers() self.vispy_view.add(self.helmet_vis) self.gaze_vis = visuals.Line() self.vispy_view.add(self.gaze_vis) self.head_mesh = visuals.Mesh(vertices=verts, shading='smooth', faces=faces, mode='triangles', color=(0.5, 0.55, 0.7)) self.head_mesh.shininess = 0 self.head_mesh.light_dir = [0, 1, 1] # self.head_mesh.light_color = np.array((1, 1, 0.95)) * 0.8 self.head_mesh.ambient_light_color = np.array((0.98, 0.98, 1)) * 0.2 self.head_mesh.attach(Alpha(0.3)) self.head_mesh.set_gl_state(depth_test=True, cull_face=True) self.head_mesh_transform = MatrixTransform() self.head_mesh.transform = self.head_mesh_transform self.vispy_view.add(self.head_mesh) self.trial_changed.emit(0) self.frame_changed.emit(0) self.show() vispy.app.run()