def put_axes(view): size = 20 domain_x = (118.34 - size / 2, 118.34 + size / 2) domain_y = (117.69 - size / 2, 117.69 + size / 2) xax = scene.Axis(pos=[[-0.5, -0.5], [0.5, -0.5]], domain=domain_x, tick_direction=(0, -1), font_size=16, axis_color='white', tick_color='white', text_color='white', parent=view) xax.transform = scene.STTransform(translate=(0, 0, -0.2)) yax = scene.Axis(pos=[[-0.5, -0.5], [-0.5, 0.5]], domain=domain_y, tick_direction=(-1, 0), font_size=16, axis_color='white', tick_color='white', text_color='white', parent=view) yax.transform = scene.STTransform(translate=(0, 0, -0.2))
def Vispy(matrice): def normalize(x, cmin=None, cmax=None, clip=True): """Normalize an array from the range [cmin, cmax] to [0,1], with optional clipping.""" if not isinstance(x, np.ndarray): x = np.array(x) if cmin is None: cmin = x.min() if cmax is None: cmax = x.max() if cmin == cmax: return .5 * np.ones(x.shape) else: cmin, cmax = float(cmin), float(cmax) y = (x - cmin) * 1. / (cmax - cmin) if clip: y = np.clip(y, 0., 1.) return y canvas = scene.SceneCanvas(keys='interactive', bgcolor='w') view = canvas.central_widget.add_view() view.camera = scene.TurntableCamera(up='z', fov=60) matrice = (matrice + 1) * 127 # Simple surface plot example # x, y values are not specified, so assumed to be 0:50 couleurs = normalize(CMAPGRAD) couleurs = np.flip(couleurs, 0) p1 = scene.visuals.SurfacePlot(z=matrice) #couleurs[0] p1.transform = scene.transforms.MatrixTransform() p1.transform.scale([1/100, 1/100, 4/100]) p1.transform.translate([0, 0, 0]) view.add(p1) p1._update_data() # p1._update_data() # cheating. cf = scene.filters.ZColormapFilter(vs.color.Colormap(couleurs, interpolation='linear'), zrange=(matrice.max(), matrice.min())) p1.attach(cf) xax = scene.Axis(pos=[[-0.5, -0.5], [0.5, -0.5]], tick_direction=(0, -1), font_size=16, axis_color='k', tick_color='k', text_color='k', parent=view.scene) xax.transform = scene.STTransform(translate=(0, 0, -0.2)) yax = scene.Axis(pos=[[-0.5, -0.5], [-0.5, 0.5]], tick_direction=(-1, 0), font_size=16, axis_color='k', tick_color='k', text_color='k', parent=view.scene) yax.transform = scene.STTransform(translate=(0, 0, -0.2)) # Add a 3D axis to keep us oriented axis = scene.visuals.XYZAxis(parent=view.scene) canvas.show() app.run()
def test_perspective_render(): with TestingCanvas(size=(120, 200)) as canvas: grid = canvas.central_widget.add_grid() imdata = io.load_crate().astype('float32') / 255 views = [] images = [] for i, imethod in enumerate(['impostor', 'subdivide']): v = grid.add_view(row=i, col=0, border_color='white') v.camera = 'turntable' v.camera.fov = 50 v.camera.distance = 30 views.append(v) image = scene.visuals.Image(imdata, method=imethod, grid=(4, 4)) image.transform = scene.STTransform(translate=(-12.8, -12.8), scale=(0.1, 0.1)) v.add(image) images.append(image) image = canvas.render() canvas.close() # Allow many pixels to differ by a small amount--texture sampling and # exact triangle position will differ across platforms. However a # change in perspective or in the widget borders should trigger a # failure. assert_image_approved(image, 'scene/cameras/perspective_test.png', 'perspective test 1: 2 identical views with ' 'correct perspective', px_threshold=20, px_count=60, max_px_diff=200)
def test_panzoom_center(): with TestingCanvas(size=(120, 200)) as canvas: grid = canvas.central_widget.add_grid() imdata = io.load_crate().astype('float32') / 255 v = grid.add_view(row=0, col=0) v.camera = 'panzoom' image = scene.visuals.Image(imdata) image.transform = scene.STTransform(translate=(-12.8, -12.8), scale=(0.1, 0.1)) v.add(image) result1 = canvas.render()[..., :3] assert v.camera.center == (0.5, 0.5, 0) v.camera.center = (-12.8, -12.8, 0) result2 = canvas.render()[..., :3] assert not np.allclose(result1, result2) # we moved to the lower-left corner of the image that means only the # upper-right quadrant should have data, the rest is black background np.testing.assert_allclose(result2[100:, :], 0) np.testing.assert_allclose(result2[:, :60], 0) assert not np.allclose(result2[:100, 60:], 0) assert v.camera.center == (-12.8, -12.8, 0)
def set_values(self,vol_array): #Calculate center and range self.calc_bounds() # Flip axes on volume array vol_flipped = np.flipud(np.swapaxes(vol_array,0,2)) # Draw volumetric plot self._volume=scene.visuals.Volume(vol_flipped,clim=self._clim) self._nx,self._ny,self._nz = vol_array.shape self._volume.transform = scene.STTransform( # Off centered but in bounds translate=(self._xlim[0]+self._xr/(2*self._nx), self._ylim[0]+self._yr/(2*self._ny), self._zlim[0]+self._zr/(2*self._nz)), scale=(self._xr/self._nx,self._yr/self._ny,self._zr/self._nz)) # Centered and out of bounds? #translate=(self._xlim[0]-self._xr/(self._nx-1), # self._ylim[0]-self._yr/(self._ny-1), # self._zlim[0]-self._zr/(self._nz-1)), #scale=(self._xr/(self._nx-1), # self._yr/(self._ny-1), # self._zr/(self._nz-1))) self._volume.cmap = colormap.CubeHelixColormap(reverse=True) self._volume.method = 'mip'
def preview_shifts(a, b, shifts): canvas = scene.SceneCanvas(keys="interactive") canvas.size = 1024, 1024 canvas.show() # create view box vb_xy = scene.widgets.ViewBox(border_color="white", parent=canvas.scene) vb_xz = scene.widgets.ViewBox(border_color="white", parent=canvas.scene) vb_yz = scene.widgets.ViewBox(border_color="white", parent=canvas.scene) vbs = vb_xy, vb_xz, vb_yz # put them in a grid grid = canvas.central_widget.add_grid() grid.padding = 6 grid.add_widget(vb_xy, 0, 0) grid.add_widget(vb_xz, 1, 0) grid.add_widget(vb_yz, 0, 1) # genereate colormap n_colors = 128 alphas = np.linspace(0.0, 1.0, n_colors) color_red = np.c_[np.ones((n_colors, )), np.zeros((n_colors, )), np.zeros((n_colors)), alphas] cmap_red = Colormap(color_red) color_blue = np.c_[np.zeros((n_colors)), np.zeros((n_colors, )), np.ones((n_colors, )), alphas] cmap_blue = Colormap(color_blue) # build shifts for mips sz, sy, sx = shifts nz, ny, nx = a.shape shifts = ((sx, sy), (sx, sz), (sy, sz)) print(shifts) # create visuals i = 0 for im, cm in zip((a, b), (cmap_red, cmap_blue)): mips = [im.max(axis=axis) for axis in range(3)] for vb, mip, shift in zip(vbs, mips, shifts): image = scene.visuals.Image(mip, cmap=cm, parent=vb.scene) image.set_gl_state("translucent", depth_test=False) # apply transformation if i > 0: image.transform = scene.STTransform(translate=shift) else: i += 1 # assign cameras for vb in vbs: vb.camera = scene.PanZoomCamera(aspect=1) vb.camera.set_range() vb.camera.flip = (0, 1, 0) app.run()
def __init__(self, keys='interactive'): super(DemoScene, self).__init__() # Layout and canvas creation box = QtGui.QVBoxLayout(self) self.resize(500, 500) self.setLayout(box) self.canvas = scene.SceneCanvas(keys=keys) box.addWidget(self.canvas.native) # Camera self.view = self.canvas.central_widget.add_view() self.view.camera = scene.cameras.TurntableCamera(elevation=25, azimuth=20, distance=2.0, center=(0, 0, 0)) # Data fitsdata = pyfits.open('l1448_13co.fits') vol1 = np.nan_to_num(fitsdata[0].data) self.vol_data = vol1 """ The transpose here and after is for solving the coordinate mismatch between volume visual input data and its rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what we see' on the screen rather than the real input data of volume. """ new_pos = np.transpose(vol1) # TODO: replace the min&max threshold with real settings in Glue UI min_threshold = np.min(self.vol_data) max_threshold = np.max(self.vol_data) self.pos_data = np.argwhere( new_pos >= min_threshold) # get voxel positions grays = get_translucent_cmap(1, 1, 1) self.volume_pool = [(vol1, (1, 6), grays)] self.volume = MultiVolume(self.volume_pool) self.trans = [ -vol1.shape[2] / 2., -vol1.shape[1] / 2., -vol1.shape[0] / 2. ] self.volume.transform = scene.STTransform(translate=self.trans) self.view.add(self.volume) # Add a 3D axis to keep us oriented axis = scene.visuals.XYZAxis(parent=self.view.scene) self.selection = SelectionCommon(canvas=self.canvas, view=self.view, vol_data=self.vol_data, volume=self.volume, volume_pool=self.volume_pool, pos_data=self.pos_data)
def __init__(self, parent=None): super(VispyWidget, self).__init__(parent=parent) # Prepare Vispy canvas. We set the depth_size to 24 to avoid issues # with isosurfaces on MacOS X self.canvas = scene.SceneCanvas(keys='interactive', show=False, config={'depth_size': 24}) # Set up a viewbox self.view = self.canvas.central_widget.add_view() self.view.parent = self.canvas.scene # Set whether we are emulating a 3D texture. This needs to be enabled # as a workaround on Windows otherwise VisPy crashes. self.emulate_texture = (sys.platform == 'win32' and sys.version_info[0] < 3) self.scene_transform = scene.STTransform() self.limit_transforms = {} # Add a 3D cube to show us the unit cube. The 1.001 factor is to make # sure that the grid lines are not 'hidden' by volume renderings on the # front side due to numerical precision. vertices, filled_indices, outline_indices = create_cube() self.axis = scene.visuals.Mesh(vertices['position'], outline_indices, color=(1, 1, 1), mode='lines') self.axis.transform = self.scene_transform self.view.add(self.axis) # Create a turntable camera. For now, this is the only camerate type # we support, but if we support more in future, we should implement # that here # Remove the fov=60 here to solve the mismatch of selection problem # self.view.camera = scene.cameras.TurntableCamera(parent=self.view.scene, distance=2) self.view.camera = scene.cameras.TurntableCamera( parent=self.view.scene, distance=2.0) # Add the native canvas widget to this widget layout = QtGui.QVBoxLayout() layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.canvas.native) self.setLayout(layout) # We need to call render here otherwise we'll later encounter an OpenGL # program validation error. self.canvas.render() # Set up callbacks add_callback(self, 'visible_axes', nonpartial(self._toggle_axes))
def test_3d_axis_visual(): canvas = scene.SceneCanvas(keys=None, size=(800, 600), show=True) view = canvas.central_widget.add_view() scene_transform = scene.STTransform() view.camera = scene.cameras.TurntableCamera(parent=view.scene, fov=0., distance=4.0) AxesVisual3D(view=view, axis_color='red', transform=scene_transform) canvas.native.show() canvas.native.close()
def setVolume(self, vol=None, path=None, sliceobj=None): if path is not None and vol is None: if '*' in path: vol = loader.loadimageseries(path) elif os.path.splitext(path)[-1] == '.npy': vol = loader.loadimage(path) else: vol = loader.loadtiffstack(path) elif vol is None: vol = self.vol if vol is None: return self.vol = vol if slice is not None: def intify(a): if a is not None: return int(a) sliceobj = [ slice(intify(s.start), intify(s.stop), intify(s.step)) for s in sliceobj ] slicevol = self.vol[sliceobj] else: slicevol = self.vol # Set whether we are emulating a 3D texture emulate_texture = False # Create the volume visuals if self.volume is None: self.volume = scene.visuals.Volume(slicevol, parent=self.view.scene, emulate_texture=emulate_texture) self.volume.method = 'translucent' else: self.volume.set_data(slicevol) self.volume._create_vertex_data( ) #TODO: Try using this instead of slicing array? # Translate the volume into the center of the view (axes are in strange order for unkown ) scale = 3 * (2.0 / self.vol.shape[1], ) translate = map(lambda x: -scale[0] * x / 2, reversed(vol.shape)) self.volume.transform = scene.STTransform(translate=translate, scale=scale)
def __init__(self, parent=None, viewer_state=None): # Prepare Vispy canvas. We set the depth_size to 24 to avoid issues # with isosurfaces on MacOS X self.canvas = scene.SceneCanvas(keys=None, show=False, config={'depth_size': 24}, bgcolor=rgb(settings.BACKGROUND_COLOR)) # Set up a viewbox self.view = self.canvas.central_widget.add_view() self.view.parent = self.canvas.scene # Set whether we are emulating a 3D texture. This needs to be enabled # as a workaround on Windows otherwise VisPy crashes. self.emulate_texture = (sys.platform == 'win32' and sys.version_info[0] < 3) self.scene_transform = scene.STTransform() self.limit_transforms = {} fc = rgb(settings.FOREGROUND_COLOR) self.axis = AxesVisual3D(axis_color=fc, tick_color=fc, text_color=fc, tick_width=1, minor_tick_length=2, major_tick_length=4, axis_width=0, tick_label_margin=10, axis_label_margin=25, tick_font_size=6, axis_font_size=8, view=self.view, transform=self.scene_transform) # Create a turntable camera. For now, this is the only camerate type # we support, but if we support more in future, we should implement # that here # Orthographic perspective view as default self.view.camera = scene.cameras.TurntableCamera(parent=self.view.scene, fov=0., distance=4.0) # We need to call render here otherwise we'll later encounter an OpenGL # program validation error. # self.canvas.render() self.viewer_state = viewer_state try: self.viewer_state.add_callback('*', self._update_from_state, as_kwargs=True) except TypeError: # glue-core >= 0.11 self.viewer_state.add_global_callback(self._update_from_state) self._update_from_state(force=True)
def __init__(self): scene.SceneCanvas.__init__(self, keys='interactive', size=(960, 960), show=True, bgcolor='black', title='MRI', vsync=False) self.unfreeze() self.view = self.central_widget.add_view() # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_thumbnail.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_scoreMap_2.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_scoreMap_1.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_scoreMap_9.npz')['arr_0'] # self.vol_data = self.vol_data / self.vol_data.max() # self.vol_data[self.vol_data < .5] = 0 self.vol_data = bp.unpack_ndarray_file('/home/yuncong/CSHL_volumes/volume_MD589_annotation.bp') # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_annotationAllClasses.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_labelmap.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD594_predMap.npz')['arr_0'] # self.vol_data = np.load('/home/yuncong/CSHL_volumes/volume_MD589_predMap.npz')['arr_0'] self.vol_data = self.vol_data[::2,::2,::2].astype(np.float)/9. # self.vol_data = np.flipud(np.rollaxis(self.vol_data, 1)) # self.sectionTo = 150 self.sectionTo = 50 colors = np.loadtxt('/home/yuncong/Brain/visualization/100colors.txt') # self.volume = scene.visuals.Volume(self.vol_data[:,0:self.sectionTo,:], parent=self.view.scene, cmap=get_colormap('coolwarm')) self.volume = scene.visuals.Volume(self.vol_data[:,0:self.sectionTo,:], parent=self.view.scene, method='mip', cmap=Colormap([(0,0,0),(0,1,0), (1,0,0), (0,1,0), (0,0,1), (1,1,0), (0,1,1), (1,1,0),(1,0.5,0),(0,0.5,0),(0,0,1)], interpolation='linear')) self.volume.transform = scene.STTransform(translate=(0,0,0)) CMAP = self.volume.cmap self.section2D = self.vol_data[:,self.sectionTo,:] self.plane = scene.visuals.Image(self.section2D, parent=self.view.scene, cmap=CMAP, relative_step_size=1.5) # self.plane.transform = scene.STTransform(translate=(0,self.sectionTo,0)) # self.plane.transform = scene.STTransform(translate=(0,0,0)) self.plane.transform = MatrixTransform() self.plane.transform.rotate(90, (1,0,0)) self.plane.transform.translate((0,self.sectionTo,0)) self.plane.attach(BlackToAlpha()) self.view.camera = scene.cameras.ArcballCamera(parent=self.view.scene)
def set_values(self,vol_array): #Calculate center and range self.calc_bounds() # Flip axes on volume array vol_flipped = np.flipud(np.swapaxes(vol_array,0,2)) # Draw volumetric plot self._volume=scene.visuals.Volume(vol_flipped) self._nx,self._ny,self._nz = vol_array.shape self._volume.transform = scene.STTransform( #translate=(-self._nx/2,-self._ny/2,-self._nz/2), translate=((1-self._nx)/2,(1-self._nz)/2,0), scale=(self._xr/self._nx,self._yr/self._ny,self._zr/self._nz)) self._volume.cmap = colormap.CubeHelixColormap() self._volume.method = 'mip'
def setData(self, data): if data is None: return data = np.nan_to_num(data) self.data = data # Create the volume visuals if self.volume is not None: self.volume.set_data(data) self.volume._create_vertex_data( ) # TODO: Try using this instead of slicing array? # Translate the volume into the center of the view (axes are in strange order for unkown ) scale = 3 * (2.0 / self.data.shape[1], ) translate = list( map(lambda x: -scale[0] * x / 2, reversed(data.shape))) self.volume.transform = scene.STTransform(translate=translate, scale=scale) self.update()
def __init__(self): scene.SceneCanvas.__init__(self, keys='interactive', size=(960, 960), show=True, bgcolor='black', title='MRI', vsync=False) self.unfreeze() self.view = self.central_widget.add_view() self.vol_data = np.load( '/home/yuncong/CSHL_volumes/volume_MD589_thumbnail.npz')['arr_0'] # self.vol_data = np.flipud(np.rollaxis(self.vol_data, 1)) self.sectionTo = 150 self.volume = scene.visuals.Volume(self.vol_data[:, 0:self.sectionTo, :], parent=self.view.scene) self.volume.transform = scene.STTransform(translate=(0, 0, 0)) CMAP = self.volume.cmap self.section2D = self.vol_data[:, self.sectionTo, :] self.plane = scene.visuals.Image(self.section2D, parent=self.view.scene, cmap=CMAP, relative_step_size=1.5) # self.plane.transform = scene.STTransform(translate=(0,self.sectionTo,0)) # self.plane.transform = scene.STTransform(translate=(0,0,0)) self.plane.transform = MatrixTransform() self.plane.transform.rotate(90, (1, 0, 0)) self.plane.transform.translate((0, self.sectionTo, 0)) self.plane.attach(BlackToAlpha()) self.view.camera = scene.cameras.ArcballCamera(parent=self.view.scene)
def __init__(self, keys='interactive'): super(DemoScene, self).__init__() # Layout and canvas creation box = QtGui.QVBoxLayout(self) self.resize(800, 600) self.setLayout(box) self.canvas = scene.SceneCanvas(keys=keys) box.addWidget(self.canvas.native) # Connect events self.canvas.events.mouse_press.connect(self.on_mouse_press) self.canvas.events.mouse_release.connect(self.on_mouse_release) self.canvas.events.mouse_move.connect(self.on_mouse_move) self.canvas.events.key_press.connect(self.on_key_press) # Setup some defaults self.mesh = None self.selected = [] self.white = (1.0, 1.0, 1.0, 1.0) self.black = (0.0, 0.0, 0.0, 0.0) # Camera self.view = self.canvas.central_widget.add_view() self.view.camera = scene.cameras.TurntableCamera(elevation=90, azimuth=0, fov=60, center=(0, 0, 0)) # Data fitsdata = fits.open('l1448_13co.fits') self.vol_data = np.nan_to_num(fitsdata[0].data) """ The transpose here and after is for solving the coordinate mismatch between volume visual input data and its rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what we see' on the screen rather than the real input data of volume. """ new_pos = np.transpose(self.vol_data) # TODO: replace the min&max threshold with real settings in Glue UI self.pos_data = np.indices(self.vol_data.shape).reshape( 3, -1).transpose() grays = get_translucent_cmap(1, 1, 1) self.volume_pool = [(self.vol_data, (1, 6), grays)] self.volume = MultiVolume(self.volume_pool) self.trans = [ -self.vol_data.shape[2] / 2., -self.vol_data.shape[1] / 2., -self.vol_data.shape[0] / 2. ] self.volume.transform = scene.STTransform(translate=self.trans) self.view.add(self.volume) self.tr = self.volume.get_transform(map_from='visual', map_to='canvas') # create a volume for showing the selected part self.volume1 = scene.visuals.Volume(self.vol_data, clim=(4, 6), parent=self.view.scene) self.volume1.transform = scene.STTransform(translate=self.trans) self.volume1.visible = False # Add a text instruction self.text = scene.visuals.Text('', color='white', pos=(self.canvas.size[0] / 4.0, 20), parent=self.canvas.scene) # Add a 3D axis to keep us oriented axis = scene.visuals.XYZAxis(parent=self.view.scene) # Set up for lasso drawing self.line_pos = [] self.line = scene.visuals.Line(color='yellow', method='gl', parent=self.canvas.scene) # Selection self.selection_flag = False self.selection_pool = { '1': 'lasso', '2': 'rectangle', '3': 'ellipse', '4': 'pick', '5': 'floodfill' } self.selection_id = '1' # default as 1 self.selection_origin = (0, 0)
def preview_volume(vols, shifts=None): canvas = scene.SceneCanvas(keys="interactive") canvas.size = 1024, 1024 canvas.show() # create view box view = canvas.central_widget.add_view() # genereate colormap """ n_colors = 256 alphas = np.linspace(0.0, 1.0, n_colors) color = np.c_[ alphas, alphas, alphas, alphas ] cmap = Colormap(color) """ from utoolbox.data.io.amira import AmiraColormap color = AmiraColormap("volrenGlow.am") color = np.array(color) color[0, :] = 0 color[:, 3] /= 100 cmap = Colormap(color) for i, vol in enumerate(vols): volume = scene.visuals.Volume(vol, cmap=cmap, parent=view.scene, emulate_texture=False) volume.method = "translucent" volume.transform = scene.STTransform(scale=(2, 2, 5.5)) volume.set_gl_state("translucent", depth_test=False) if shifts: volume.transform = scene.STTransform(translate=shifts[i]) # assign camera camera = scene.cameras.TurntableCamera(parent=view.scene, fov=60.0, name="Arcball", elevation=30.0) view.camera = camera view.camera.flip = (False, True, True) view.camera.reset() # axis axis = scene.visuals.XYZAxis(parent=view) s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1)) affine = s.as_matrix() axis.transform = affine # link with camera @canvas.events.mouse_move.connect def on_mouse_move(event): if event.button == 1 and event.is_dragging: axis.transform.reset() axis.transform.rotate(camera.roll, (0, 0, 1)) axis.transform.rotate(camera.elevation, (1, 0, 0)) axis.transform.rotate(camera.azimuth, (0, 1, 0)) axis.transform.scale((50, 50, 0.001)) axis.transform.translate((50.0, 50.0)) axis.update() # render rotation movie """ n_steps = 240 axis = [0, 0, 0] logger.debug(".. rendering") step_angle = 360.0 / n_steps writer = imageio.get_writer("t1-head_split_rotate.mp4", fps=24) for i in range(n_steps): im = canvas.render() writer.append_data(im) view.camera.transform.rotate(step_angle, axis) writer.close() """ app.run()
trans = (-(xmax - xmin) / 2.0, -(ymax - ymin) / 2.0, -(zmax - zmin) / 2.0) # Create a canvas with a 3D viewport canvas = scene.SceneCanvas(keys='interactive', config={'depth_size': 24}, bgcolor='white') view = canvas.central_widget.add_view() surface = scene.visuals.Isosurface(density, level=density.max() / 2., shading=None, color=(0.5, 0.6, 1, 0.5), parent=view.scene) # surface.transform = scene.transforms.STTransform(translate=(-len(X)/2., -len(Y)/2., -len(Z)/2.)) surface.set_gl_state(depth_test=True, cull_face=True) surface.transform = scene.STTransform(translate=trans) surface2 = scene.visuals.Isosurface(density, level=density.max() / 4., shading=None, color=(1, 0, 0, 0.1), parent=view.scene) # surface2.transform = scene.transforms.STTransform(translate=(-s[0]/2., -s[1]/2., -s[2]/2.)) surface2.set_gl_state(depth_test=True, cull_face=True) surface2.transform = scene.STTransform(translate=trans) # Add a 3D scatter plot scat_visual = scene.visuals.Markers() scat_visual.set_data(P, symbol='disc', edge_color=None,
def on_key_press(event): if event.text == 's': global still_num # Stop preexisting animation fade_out.stop() usr_message.text = 'Saved still image' usr_message.color = (1, 1, 1, 0) # Write screen to .png still = canvas.render() still_name = str(seq) + "_" + str(still_num) + ".png" io.write_png(still_name, still) still_num = still_num + 1 # Display and fade saved message fade_out.start() if event.text == 'e': global load_data # Stop preexisting animation fade_out.stop() usr_message.color = (1, 1, 1, 0) if load_data == None: global fractal_data # export data created in this program file_name = "3D_Fractal_" + seq + "_steps" + str(steps) np.save(file_name, fractal_data, allow_pickle=False) # set user message usr_message.text = 'Exported fractal' else: # set user message usr_message.text = 'Cannot export data loaded into program' # display user message fade_out.start() if event.text == 'l': global volume, loaded_data_later loaded_data_later = True # Stop preexisting animation fade_out.stop() usr_message.color = (1, 1, 1, 0) # open file dialog to select load data root = tk.Tk() root.withdraw() load_data = filedialog.askopenfilename() # make sure file extension is .npy file_ext = load_data[len(load_data) - 3:] if file_ext != 'npy': usr_message.text = 'Can only load .npy files' else: usr_message.text = 'Fractal loaded' # load fractal data fractal_data = np.load(load_data) # normalize data and get color map fractal_3D, chaotic_boundary = normalize(fractal_data, 0.0) fractal_map = getfractalcolormap(chaotic_boundary) # erase old volume volume.parent = None # make new volume from normalized fractal data volume = scene.visuals.Volume(fractal_3D, clim=(0, 1), method='translucent', parent=view.scene, threshold=0.225, cmap=fractal_map, emulate_texture=False) volume.transform = scene.STTransform(translate=(-steps // 2, -steps // 2, -steps // 2)) # display user message fade_out.start()
oranges = np.array([1, 0.5, 0, 0.5]) # Combine data combined_data = np.zeros(data.shape + (4, )) combined_data += data[:, :, :, np.newaxis] / 6. * grays combined_data += subset1[:, :, :, np.newaxis] / 4. * reds combined_data += subset2[:, :, :, np.newaxis] / 4. * greens combined_data += subset3[:, :, :, np.newaxis] / 4. * blues combined_data += subset4[:, :, :, np.newaxis] / 4. * oranges combined_data /= 5. combined_data = np.clip(combined_data, 0, 1) volume1 = RGBAVolume(combined_data, parent=view.scene) volume1.transform = scene.STTransform(translate=(64, 64, 0)) view.camera = scene.cameras.TurntableCamera(parent=view.scene, fov=60., name='Turntable') canvas.update() # # # create colormaps that work well for translucent and additive volume rendering # # # # for testing performance # # @canvas.connect # # def on_draw(ev): # # canvas.update() #
# Create some visuals to show # AK: Ideally, we could just create one visual that is present in all # scenes, but that results in flicker for the PanZoomCamera, I suspect # due to errors in transform caching. im1 = io.load_crate().astype('float32') / 255 #image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes) for par in scenes: image = scene.visuals.Image(im1, grid=(20, 20), parent=par) #vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0'] #volume1 = scene.visuals.Volume(vol1, parent=scenes) #volume1.transform = scene.STTransform(translate=(0, 0, 10)) # Assign cameras vb1.camera = scene.BaseCamera() vb2.camera = scene.PanZoomCamera() vb3.camera = scene.TurntableCamera() vb4.camera = scene.FlyCamera() # If True, show a cuboid at each camera if False: cube = scene.visuals.Cube((3, 3, 5)) cube.transform = scene.STTransform(translate=(0, 0, 6)) for vb in (vb1, vb2, vb3, vb4): vb.camera.parents = scenes cube.add_parent(vb.camera) if __name__ == '__main__': if sys.flags.interactive != 1: app.run()
sur_scale = [(xmax - xmin) / 50., (ymax - ymin) / 50., (zmax - zmin) / 50.] # Create a canvas with a 3D viewport canvas = scene.SceneCanvas(keys='interactive', config={'depth_size': 24}, bgcolor='white') view = canvas.central_widget.add_view() surface = scene.visuals.Isosurface(density, level=density.max() / 2., shading=None, color=(0.5, 0.6, 1, 0.5), parent=view.scene) # surface.transform = scene.transforms.STTransform(translate=(-len(X)/2., -len(Y)/2., -len(Z)/2.)) surface.set_gl_state(depth_test=True, cull_face=True) surface.transform = scene.STTransform(translate=trans, scale=sur_scale) surface2 = scene.visuals.Isosurface(density, level=density.max() / 4., shading=None, color=(1, 0, 0, 0.1), parent=view.scene) # surface2.transform = scene.transforms.STTransform(translate=(-s[0]/2., -s[1]/2., -s[2]/2.)) surface2.set_gl_state(depth_test=True, cull_face=True) surface2.transform = scene.STTransform(translate=trans, scale=sur_scale) # Add a 3D scatter plot scat_visual = scene.visuals.Markers() scat_visual.set_data(P, symbol='disc', edge_color=None,
# Prepare canvas canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True) canvas.measure_fps() # Set up a viewbox to display the image with interactive pan/zoom view = canvas.central_widget.add_view() # Set whether we are emulating a 3D texture emulate_texture = False # Create the volume visuals, only one is visible volume1 = scene.visuals.Volume(vol1, parent=view.scene, threshold=0.225, emulate_texture=emulate_texture) volume1.transform = scene.STTransform(translate=(64, 64, 0)) with open('kelp_pickle/PP_3d.pickle', 'rb') as inFile: myVol = pickle.load(inFile) myVol = np.flipud(np.rollaxis(myVol, 2)) volume2 = scene.visuals.Volume(myVol, parent=view.scene, threshold=0.2, emulate_texture=emulate_texture) volume1.visible = False nx, ny, nz = myVol.shape volume2.transform = scene.STTransform(translate=(-nx / 4, -ny / 2, -nz / 2)) # Create three cameras (Fly, Turntable and Arcball) fov = 60 cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly') cam2 = scene.cameras.TurntableCamera(parent=view.scene,
# Set up a viewbox to display the image with interactive pan/zoom view = canvas.central_widget.add_view() camera = scene.cameras.ArcballCamera(parent=view.scene, fov=60, scale_factor=steps * 3, center=(0, 0, 0)) view.camera = camera # Create the volume volume = scene.visuals.Volume(fractal_3D, clim=(0, 1), method='translucent', parent=view.scene, threshold=0.225, emulate_texture=False) volume.transform = scene.STTransform(translate=(-steps // 2, -steps // 2, -steps // 2)) # Creating color map to display fractal fractal_colors = [(1, 0, 1, .5), (0, 0, 1, .5), (.1, .8, .8, .3), (.1, 1, .1, .3), (1, 1, 0, .2), (1, 0, 0, .1), (0, 1, 1, (1 - chaotic_boundary) / 7), (0, 1, .8, (1 - chaotic_boundary) / 8), (0, 0, 0, 0), (0, 0, 0, 0)] color_control_pts = [ 0, (0.6 * chaotic_boundary), (0.7 * chaotic_boundary), (0.8 * chaotic_boundary), (0.9 * chaotic_boundary), (0.95 * chaotic_boundary), (0.97 * chaotic_boundary), (0.99 * chaotic_boundary), chaotic_boundary, chaotic_boundary, 1.0 ] fractal_map = Colormap(fractal_colors,
xax.stretch = (1, 0.05) grid.add_widget(xax, 1, 1) xax.link_view(view) N = 4900 M = 2000 cols = int(N**0.5) view.camera.rect = (0, 0, cols, N / cols) lines = scene.ScrollingLines(n_lines=N, line_size=M, columns=cols, dx=0.8 / M, cell_size=(1, 8), parent=view.scene) lines.transform = scene.STTransform(scale=(1, 1 / 8.)) def update(ev): m = 50 data = np.random.normal(size=(N, m), scale=0.3) data[data > 1] += 4 lines.roll_data(data) canvas.context.flush() # prevent memory leak when minimized timer = app.Timer(connect=update, interval=0) timer.start() if __name__ == '__main__': import sys
# Prepare canvas canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True) # Set up a viewbox to display the image with interactive pan/zoom view = canvas.central_widget.add_view() # Create the visuals vol = np.load(io.load_data_file('volume/stent.npz'))['arr_0'] volume = scene.visuals.Volume(vol, parent=view.scene, threshold=0.225) np.random.seed(1) points = np.random.rand(100, 3) * (128, 128, 128) markers = scene.visuals.Markers(pos=points, parent=view.scene) # add a transform to markers, to show clipping is in scene coordinates markers.transform = scene.STTransform(translate=(0, 0, 128)) # Create the clipping planes filter for the markers (Volume has its own clipping logic) clipper = PlanesClipper() # and attach it to the markers markers.attach(clipper) # Create and set the camera fov = 60. cam = scene.cameras.TurntableCamera(parent=view.scene, fov=fov, name='Turntable') view.camera = cam # since volume data is in 'zyx' coordinates, we have to reverse the coordinates # we use as a center
def __init__(self, vol1_path, vol2_path): vol1_path = vol1_path vol2_path = vol2_path vol1 = load_numpy_array(vol1_path) if vol2_path is not None: vol2 = load_numpy_array(vol2_path) canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True) # Set up a viewbox to display the image with interactive pan/zoom view = canvas.central_widget.add_view() # Set whether we are emulating a 3D texture emulate_texture = False # Create the volume visuals, only one is visible volume1 = scene.visuals.Volume(vol1, parent=view.scene, threshold=0.225, emulate_texture=emulate_texture) volume1.transform = scene.STTransform(translate=(64, 64, 0)) if vol2_path is not None: volume2 = scene.visuals.Volume(vol2, parent=view.scene, threshold=0.2, emulate_texture=emulate_texture) volume2.visible = False # Create three cameras (Fly, Turntable and Arcball) fov = 60. cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly') cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov, name='Turntable') cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball') view.camera = cam2 # Select turntable at first # Create an XYZaxis visual axis = scene.visuals.XYZAxis(parent=view) s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1)) affine = s.as_matrix() axis.transform = affine # Implement axis connection with cam2 @canvas.events.mouse_move.connect def on_mouse_move(event): if event.button == 1 and event.is_dragging: axis.transform.reset() axis.transform.rotate(cam2.roll, (0, 0, 1)) axis.transform.rotate(cam2.elevation, (1, 0, 0)) axis.transform.rotate(cam2.azimuth, (0, 1, 0)) axis.transform.scale((50, 50, 0.001)) axis.transform.translate((50., 50.)) axis.update() # Implement key presses @canvas.events.key_press.connect def on_key_press(event): global opaque_cmap, translucent_cmap if event.text == '1': cam_toggle = {cam1: cam2, cam2: cam3, cam3: cam1} view.camera = cam_toggle.get(view.camera, cam2) print(view.camera.name + ' camera') if view.camera is cam2: axis.visible = True else: axis.visible = False elif event.text == '2': methods = ['mip', 'translucent', 'iso', 'additive'] method = methods[(methods.index(volume1.method) + 1) % 4] print("Volume render method: %s" % method) cmap = \ opaque_cmap if method in ['mip', 'iso'] else \ translucent_cmap volume1.method = method volume1.cmap = cmap if vol2_path is not None: volume2.method = method volume2.cmap = cmap elif event.text == '3': volume1.visible = not volume1.visible volume2.visible = not volume1.visible elif event.text == '4': if volume1.method in ['mip', 'iso']: cmap = opaque_cmap = next(opaque_cmaps) else: cmap = translucent_cmap = next(translucent_cmaps) volume1.cmap = cmap if vol2_path is not None: volume2.cmap = cmap elif event.text == '0': cam1.set_range() cam3.set_range() elif event.text != '' and event.text in '[]': s = -0.025 if event.text == '[' else 0.025 volume1.threshold += s if vol2_path is not None: volume2.threshold += s if volume1.visible: th = volume1.threshold if vol2_path is not None: th = volume2.threshold print("Isosurface threshold: %0.3f" % th)
# Prepare canvas canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True) #canvas.measure_fps() # Set up a viewbox to display the image with interactive pan/zoom view = canvas.central_widget.add_view() # Set whether we are emulating a 3D texture emulate_texture = False # Create the volume visual vol = scene.visuals.Volume(vol, parent=view.scene, threshold=0.225, emulate_texture=emulate_texture) vol.transform = scene.STTransform(scale=(1, 1, z_stretch)) # Create two cameras (1 for firstperson, 3 for 3d person) fov = 60. cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly') cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov, name='Turntable') cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball') view.camera = cam2 # Select turntable at first # create colormaps that work well for translucent and additive volume rendering class TransFire(BaseColormap): glsl_map = """ vec4 translucent_fire(float t) {
p1.transform.translate([-0.5, -0.5, 0]) view.add(p1) # p1._update_data() # cheating. # cf = scene.filters.ZColormapFilter('fire', zrange=(z.max(), z.min())) # p1.attach(cf) xax = scene.Axis(pos=[[-0.5, -0.5], [0.5, -0.5]], tick_direction=(0, -1), font_size=16, axis_color='k', tick_color='k', text_color='k', parent=view.scene) xax.transform = scene.STTransform(translate=(0, 0, -0.2)) yax = scene.Axis(pos=[[-0.5, -0.5], [-0.5, 0.5]], tick_direction=(-1, 0), font_size=16, axis_color='k', tick_color='k', text_color='k', parent=view.scene) yax.transform = scene.STTransform(translate=(0, 0, -0.2)) # Add a 3D axis to keep us oriented axis = scene.visuals.XYZAxis(parent=view.scene) if __name__ == '__main__': canvas.show()
imdata = io.load_crate().astype('float32') / 255 views = [] images = [] for i in range(2): for j in range(2): v = grid.add_view(row=i, col=j, border_color='white') v.camera = 'turntable' v.camera.fov = 50 v.camera.distance = 30 #v.camera = 'panzoom' #v.camera.aspect = 1 views.append(v) image = scene.visuals.Image(imdata, method='impostor', grid=(4, 4)) image.transform = scene.STTransform(translate=(-12.8, -12.8), scale=(0.1, 0.1)) v.add(image) images.append(image) @canvas.connect def on_key_press(ev): if ev.key.name == '1': print("Image method: impostor") for im in images: im.method = 'impostor' elif ev.key.name == '2': print("Image method: subdivide") for im in images: im.method = 'subdivide' elif ev.key.name == '3':