def __init__(self, viewer, parent=None, order=0): self._viewer = viewer self._data = np.array([ [0, 0, -1], [1, 0, -1], [0, -5, -1], [0, 5, -1], [1, -5, -1], [1, 5, -1], ]) self._default_color = np.array([1, 0, 1, 1]) self._target_length = 150 self._scale = 1 self._quantity = None self._unit_reg = None self.node = Line(connect='segments', method='gl', parent=parent, width=3) self.node.order = order self.node.transform = STTransform() # In order for the text to always appear centered on the scale bar, # the text node should use the line node as the parent. self.text_node = Text(pos=[0.5, -1], parent=self.node) self.text_node.order = order self.text_node.transform = STTransform() self.text_node.font_size = 10 self.text_node.anchors = ("center", "center") self.text_node.text = f"{1}px" # Note: # There are issues on MacOS + GitHub action about destroyed # C/C++ object during test if those don't get disconnected. def set_none(): self.node._set_canvas(None) self.text_node._set_canvas(None) # the two canvas are not the same object, better be safe. self.node.canvas._backend.destroyed.connect(set_none) self.text_node.canvas._backend.destroyed.connect(set_none) assert self.node.canvas is self.text_node.canvas # End Note self._viewer.events.theme.connect(self._on_data_change) self._viewer.scale_bar.events.visible.connect(self._on_visible_change) self._viewer.scale_bar.events.colored.connect(self._on_data_change) self._viewer.scale_bar.events.ticks.connect(self._on_data_change) self._viewer.scale_bar.events.position.connect( self._on_position_change) self._viewer.camera.events.zoom.connect(self._on_zoom_change) self._viewer.scale_bar.events.font_size.connect(self._on_text_change) self._viewer.scale_bar.events.unit.connect(self._on_dimension_change) self._on_visible_change(None) self._on_data_change(None) self._on_dimension_change(None) self._on_position_change(None)
def __init__(self, viewer, parent=None, order=0): self._viewer = viewer # Load logo and make grayscale logopath = join(dirname(__file__), '..', 'resources', 'logo.png') logo = imread(logopath) self._logo_raw = logo self._logo_border = np.all(logo[..., :3] == [38, 40, 61], axis=2) self._logo = np.zeros(logo.shape) self.node = ImageNode(parent=parent) self.node.order = order self.node.cmap = 'grays' self.node.transform = STTransform() self.text_node = Text(pos=[0, 0], parent=parent, method='gpu', bold=False) self.text_node.order = order self.text_node.transform = STTransform() self.text_node.anchors = ('left', 'center') self.text_node.text = ('to add data:\n' ' - drag and drop file(s) here\n' ' - select File > Open from the menu\n' ' - call a viewer.add_* method') self.text_node.color = np.divide( str_to_rgb(darken(self._viewer.palette['foreground'], 30)), 255) self._on_palette_change(None) self._on_visible_change(None) self._on_canvas_change(None)
def attach(self, viewer, view, canvas, parent=None, order=0): super().attach(viewer, view, canvas, parent, order) self.rect_node = Compound( [Line(connect='segments', method='gl', width=4)], parent=parent, ) self.rect_node.transform = STTransform() self.rect_node.order = order self.handle_node = Compound( [Line(connect='segments', method='gl', width=2)], parent=parent, ) self.handle_node.transform = STTransform() self.handle_node.order = order self._nodes = [self.rect_node, self.handle_node] canvas.connect(self.on_mouse_press) canvas.connect(self.on_mouse_move) canvas.connect(self.on_mouse_release) self._viewer.camera.events.zoom.connect(self._on_zoom_change) self._viewer.dims.events.ndisplay.connect(self._on_data_change) self._on_zoom_change(None) self._on_data_change(None) self._update_position() self._update_size()
def test_mesh_shading_filter(shading): size = (45, 40) with TestingCanvas(size=size, bgcolor="k") as c: v = c.central_widget.add_view(border_width=0) # Create visual mdata = create_sphere(20, 40, radius=20) mesh = scene.visuals.Mesh(meshdata=mdata, shading=shading, color=(0.1, 0.3, 0.7, 0.9)) v.add(mesh) from vispy.visuals.transforms import STTransform mesh.transform = STTransform(translate=(20, 20)) mesh.transforms.scene_transform = STTransform(scale=(1, 1, 0.01)) rendered = c.render()[..., 0] # R channel only if shading in ("flat", "smooth"): # there should be a gradient, not solid colors assert np.unique(rendered).size >= 28 # sphere/circle starts "dark" on the left and gets brighter # then hits a bright spot and decreases after invest_row = rendered[23].astype(np.float64) # overall, we should be increasing brightness up to a "bright spot" assert (np.diff(invest_row[:29]) >= -1).all() else: assert np.unique(rendered).size == 2
def __init__(self, texture, pos): self.vshader = Function(""" void line_of_sight() { vec4 polar_pos = $transform(vec4($pos, 1)); if( polar_pos.x > 0.999 ) { polar_pos.x = 0.001; } vec4 c = texture2D($texture, vec2(polar_pos.x, 0.5)); float depth = c.r; if( polar_pos.y > depth+0.5 ) { $mask = vec3(0.5, 0.5, 1); // out-of-sight objects turn blue } else { $mask = vec3(1, 1, 1); } } """) self.fshader = Function(""" void apply_texture_mask() { gl_FragColor *= vec4($mask,1); } """) self.center = STTransform() self.transform = STTransform( scale=(0.5 / np.pi, 1, 0), translate=(0.5, 0, 0)) * PolarTransform().inverse * self.center self.vshader['pos'] = pos self.vshader['transform'] = self.transform self.vshader['texture'] = texture self.vshader['mask'] = Varying('mask', dtype='vec3') self.fshader['mask'] = self.vshader['mask']
def __define_eyeballs(self): lsphere = visuals.Sphere(radius=0.024, method='ico', color='red') rsphere = visuals.Sphere(radius=0.024, method='ico', color='green') lsphere.transform = STTransform(translate=self.leyeball) rsphere.transform = STTransform(translate=self.reyeball) self.view.add(lsphere) self.view.add(rsphere)
def __init__(self): app.Canvas.__init__(self, keys='interactive', size=(800, 550)) self.meshes = [] self.rotation = MatrixTransform() # Generate some data to work with global mdata mdata = create_sphere(20, 40, 1.0) # Mesh with pre-indexed vertices, uniform color self.meshes.append(visuals.MeshVisual(meshdata=mdata, color='b')) # Mesh with pre-indexed vertices, per-face color # Because vertices are pre-indexed, we get a different color # every time a vertex is visited, resulting in sharp color # differences between edges. verts = mdata.get_vertices(indexed='faces') nf = verts.size // 9 fcolor = np.ones((nf, 3, 4), dtype=np.float32) fcolor[..., 0] = np.linspace(1, 0, nf)[:, np.newaxis] fcolor[..., 1] = np.random.normal(size=nf)[:, np.newaxis] fcolor[..., 2] = np.linspace(0, 1, nf)[:, np.newaxis] mesh = visuals.MeshVisual(vertices=verts, face_colors=fcolor) self.meshes.append(mesh) # Mesh with unindexed vertices, per-vertex color # Because vertices are unindexed, we get the same color # every time a vertex is visited, resulting in no color differences # between edges. verts = mdata.get_vertices() faces = mdata.get_faces() nv = verts.size // 3 vcolor = np.ones((nv, 4), dtype=np.float32) vcolor[:, 0] = np.linspace(1, 0, nv) vcolor[:, 1] = np.random.normal(size=nv) vcolor[:, 2] = np.linspace(0, 1, nv) self.meshes.append(visuals.MeshVisual(verts, faces, vcolor)) self.meshes.append( visuals.MeshVisual(verts, faces, vcolor, shading='flat')) self.meshes.append( visuals.MeshVisual(verts, faces, vcolor, shading='smooth')) # Lay out meshes in a grid grid = (3, 3) s = 300. / max(grid) for i, mesh in enumerate(self.meshes): x = 800. * (i % grid[0]) / grid[0] + 400. / grid[0] - 2 y = 800. * (i // grid[1]) / grid[1] + 400. / grid[1] + 2 transform = ChainTransform([ STTransform(translate=(x, y), scale=(s, s, s)), self.rotation ]) mesh.transform = transform mesh.transforms.scene_transform = STTransform(scale=(1, 1, 0.01)) self.show() self.timer = app.Timer(connect=self.rotate) self.timer.start(0.016)
def test_transforms(): # test transform mapping between nodes root = Node() n1 = Node(parent=root) n2 = Node(parent=n1) n3 = Node(parent=root) n4 = Node(parent=n3) n1.transform = STTransform(scale=(0.1, 0.1), translate=(7, 6)) n2.transform = STTransform(scale=(0.2, 0.3), translate=(5, 4)) n3.transform = STTransform(scale=(0.4, 0.5), translate=(3, 2)) n4.transform = STTransform(scale=(0.6, 0.7), translate=(1, 0)) assert np.allclose(n1.transform.map((0, 0))[:2], (7, 6)) assert np.allclose(n1.node_transform(root).map((0, 0))[:2], (7, 6)) assert np.allclose(n2.transform.map((0, 0))[:2], (5, 4)) assert np.allclose(n2.node_transform(root).map((0, 0))[:2], (5*0.1+7, 4*0.1+6)) assert np.allclose(root.node_transform(n1).map((0, 0))[:2], (-7/0.1, -6/0.1)) assert np.allclose(root.node_transform(n2).map((0, 0))[:2], ((-7/0.1-5)/0.2, (-6/0.1-4)/0.3)) # just check that we can assemble transforms correctly mapping across the # scenegraph assert n2.node_path(n4) == ([n2, n1, root], [n3, n4]) assert n4.node_path(n2) == ([n4, n3, root], [n1, n2]) assert n2.node_path(root) == ([n2, n1, root], []) assert root.node_path(n4) == ([root], [n3, n4]) assert n2.node_path_transforms(n4) == [n4.transform.inverse, n3.transform.inverse, n1.transform, n2.transform] assert n4.node_path_transforms(n2) == [n2.transform.inverse, n1.transform.inverse, n3.transform, n4.transform] pts = np.array([[0, 0], [1, 1], [-56.3, 800.2]]) assert np.all(n2.node_transform(n1).map(pts) == n2.transform.map(pts)) assert np.all(n2.node_transform(root).map(pts) == n1.transform.map(n2.transform.map(pts))) assert np.all(n1.node_transform(n3).map(pts) == n3.transform.inverse.map(n1.transform.map(pts))) assert np.all(n2.node_transform(n3).map(pts) == n3.transform.inverse.map( n1.transform.map(n2.transform.map(pts)))) assert np.all(n2.node_transform(n4).map(pts) == n4.transform.inverse.map(n3.transform.inverse.map( n1.transform.map(n2.transform.map(pts))))) # test transforms still work after reparenting n3.parent = n1 assert np.all(n2.node_transform(n4).map(pts) == n4.transform.inverse.map( n3.transform.inverse.map(n2.transform.map(pts)))) # test transform simplification assert np.all(n2.node_transform(n4).map(pts) == n2.node_transform(n4).simplified.map(pts))
def update(ev): global x1, y1, z1, x2, y2, z2 sphere1.transform = STTransform(translate=[x1, y1, z1]) sphere3.transform = STTransform(translate=[x2, y2, z2]) y1 -= 0.4 z2 -= 0.4 view.camera.fov = view.camera.fov + 1
def __init__(self): vispy.app.Canvas.__init__(self, keys='interactive', size=(800, 800)) # Create 4 copies of an image to be displayed with different transforms image = get_image() self.images = [visuals.ImageVisual(image, method='impostor') for i in range(4)] # Transform all images to a standard size / location (because # get_image() might return unexpected sizes) s = 100. / max(self.images[0].size) tx = 0.5 * (100 - (self.images[0].size[0] * s)) ty = 0.5 * (100 - (self.images[0].size[1] * s)) base_tr = STTransform(scale=(s, s), translate=(tx, ty)) self.images[0].transform = (STTransform(scale=(30, 30), translate=(600, 600)) * SineTransform() * STTransform(scale=(0.1, 0.1), translate=(-5, -5)) * base_tr) tr = MatrixTransform() tr.rotate(40, (0, 0, 1)) tr.rotate(30, (1, 0, 0)) tr.translate((0, -20, -60)) p = MatrixTransform() p.set_perspective(0.5, 1, 0.1, 1000) tr = p * tr tr1 = (STTransform(translate=(200, 600)) * tr * STTransform(translate=(-50, -50)) * base_tr) self.images[1].transform = tr1 tr2 = (STTransform(scale=(3, -100), translate=(200, 50)) * LogTransform((0, 2, 0)) * STTransform(scale=(1, -0.01), translate=(-50, 1.1)) * base_tr) self.images[2].transform = tr2 tr3 = (STTransform(scale=(400, 400), translate=(570, 400)) * PolarTransform() * STTransform(scale=(np.pi/150, -0.005), translate=(-3.3*np.pi/4., 0.7)) * base_tr) self.images[3].transform = tr3 text = visuals.TextVisual( text=['logarithmic', 'polar', 'perspective', 'custom (sine)'], pos=[(100, 20), (500, 20), (100, 410), (500, 410)], color='k', font_size=16) self.visuals = self.images + [text] self.show()
def __init__(self, texture, transform, scale): self.fshader = Function(""" void apply_texture_mask() { vec4 tex_pos = $transform(gl_FragCoord); tex_pos /= tex_pos.w; vec4 mask = texture2D($texture, tex_pos.xy); mask.w = 1.0; gl_FragColor = gl_FragColor * mask; } """) self.fshader['texture'] = texture self.scale_tr = STTransform(scale=scale) * STTransform(translate=(0.5, 0.5)) self.fshader['transform'] = self.scale_tr * transform
def __init__(self): vispy.app.Canvas.__init__(self, keys='interactive', size=(800, 800)) self.images = [visuals.ImageVisual(image, method='impostor') for i in range(4)] self.images[0].transform = (STTransform(scale=(30, 30), translate=(600, 600)) * SineTransform() * STTransform(scale=(0.1, 0.1), translate=(-5, -5))) tr = AffineTransform() tr.rotate(30, (0, 0, 1)) tr.scale((3, 3)) self.images[1].transform = (STTransform(translate=(200, 600)) * tr * STTransform(translate=(-50, -50))) self.images[2].transform = (STTransform(scale=(3, -150), translate=(200, 100)) * LogTransform((0, 2, 0)) * STTransform(scale=(1, -0.01), translate=(-50, 1.3))) self.images[3].transform = (STTransform(scale=(400, 400), translate=(600, 300)) * PolarTransform() * STTransform(scale=(np.pi/200, 0.005), translate=(-3*np.pi/4., 0.1))) for img in self.images: img.tr_sys = TransformSystem(self) img.tr_sys.visual_to_document = img.transform self.show()
def test_canvas_render(blend_func): """Test rendering a canvas to an array. Different blending functions are used to test what various Visuals may produce without actually using different types of Visuals. """ with Canvas(size=(125, 125), show=True, title='run') as c: im1 = np.zeros((100, 100, 4)).astype(np.float32) im1[:, :, 0] = 1 im1[:, :, 3] = 1 im2 = np.zeros((50, 50, 4)).astype(np.float32) im2[:, :, 1] = 1 im2[:, :, 3] = 0.4 # Create the image image1 = ImageVisual(im1) image1.transform = STTransform(translate=(20, 20, 0)) image1.transforms.configure(canvas=c, viewport=(0, 0, 125, 125)) image2 = ImageVisual(im2) image2.transform = STTransform(translate=(0, 0, -1)) image2.transforms.configure(canvas=c, viewport=(0, 0, 125, 125)) if blend_func: image1.set_gl_state(preset='translucent', blend_func=blend_func) image2.set_gl_state(preset='translucent', blend_func=blend_func) @c.events.draw.connect def on_draw(ev): gloo.clear('black') gloo.set_viewport(0, 0, *c.physical_size) image1.draw() image2.draw() rgba_result = c.render() rgb_result = c.render(alpha=False) # the results should be the same except for alpha np.testing.assert_allclose(rgba_result[..., :3], rgb_result) # the image should have something drawn in it assert not np.allclose(rgba_result[..., :3], 0) # the alpha should not be completely transparent assert not np.allclose(rgba_result[..., 3], 0) if blend_func is None or 'one' in blend_func: # no transparency np.testing.assert_allclose(rgba_result[..., 3], 255) else: # the alpha should have some transparency assert (rgba_result[..., 3] != 255).any()
def __init__(self, scene, los_tex, size, supersample=4): vert = """ #version 120 attribute vec2 pos; varying vec2 v_pos; void main(void) { gl_Position = vec4(pos, 0, 1); v_pos = $transform(gl_Position).xy; } """ frag = """ #version 120 varying vec2 v_pos; uniform sampler2D los_tex; void main(void) { vec2 polar_pos = $transform(vec4(v_pos, 0, 1)).xy; float los_depth = texture2D(los_tex, vec2(polar_pos.x, 0.5)).r; float diff = (los_depth+1 - polar_pos.y); gl_FragColor = vec4(diff, diff, diff, 1); } """ self.scene = scene self.size = (size[0] * supersample, size[1] * supersample) self.vertices = np.array( [[-1, -1], [1, -1], [-1, 1], [-1, 1], [1, -1], [1, 1]], dtype='float32') self.program = ModularProgram(vert, frag) self.program['pos'] = self.vertices self.program['los_tex'] = los_tex self.program.vert['transform'] = STTransform( scale=(size[1] / 2., size[0] / 2.)) * STTransform(translate=(1, 1)) self.center = STTransform() self.program.frag['transform'] = STTransform( scale=(0.5 / np.pi, 1, 1), translate=(0.5, 0, 0)) * PolarTransform().inverse * self.center self.tex = vispy.gloo.Texture2D(shape=self.size + (4, ), format='rgba', interpolation='linear') self.fbo = vispy.gloo.FrameBuffer(color=self.tex, depth=vispy.gloo.RenderBuffer( self.size))
def create_cities(self): # initialize city markers self.markers = Markers(parent=self.view.scene) # move z-direction a bit negative (means nearer to the viewer) self.markers.transform = STTransform(translate=(0, 0, -10)) cities = utils.get_cities_coords() cnameList = [] ccoordList = [] for k, v in cities.items(): cnameList.append(k) ccoordList.append(v) ccoord = np.vstack(ccoordList) ccoord = utils.wgs84_to_radolan(ccoord) pos_scene = np.zeros((ccoord.shape[0], 2), dtype=np.float32) pos_scene[:] = ccoord - self.r0 # initialize Markers self.markers.set_data(pos=pos_scene, symbol="disc", edge_color="blue", face_color='red', size=10) # initialize Markertext self.text = Text(text=cnameList, pos=pos_scene, font_size=15, anchor_x='right', anchor_y='top', parent=self.view.scene)
def create_marker(self, id, pos, name): marker = Markers(parent=self.view.scene) marker.transform = STTransform(translate=(0, 0, -10)) marker.interactive = True # add id marker.unfreeze() marker.id = id marker.freeze() marker.set_data(pos=pos[np.newaxis], symbol="disc", edge_color="blue", face_color='red', size=10) # initialize Markertext text = Text(text=name, pos=pos, font_size=15, anchor_x='right', anchor_y='top', parent=self.view.scene) return marker, text
def load_cue(self, cue_file, cue_name=None): _cue = Cue(cue_file) _cue.name = cue_name self.cues[cue_name] = _cue self.cues[cue_name].center = self.maze.coord[cue_name] self.cues[cue_name].origin = self.origin self.cues[cue_name].transform = STTransform() self.cues[cue_name].scale(100) self.cues[cue_name].pos = [0, 0, self.cues[cue_name].center[-1]] self.view.add(self.cues[cue_name]) self.cues_height[cue_name] = self.cues[cue_name].center[ -1] # jovian will use this papameter @_cue.connect def on_move(target_item, target_pos): if hasattr(self, 'jov'): self.jov.teleport(prefix='model', target_pos=target_pos, target_item=target_item) else: _cue_default_offset = self.cues[ target_item]._xy_center * self.cues[ target_item]._scale_factor self.cues[ target_item]._transform.translate = self._to_jovian_coord( target_pos).astype(np.float32) - _cue_default_offset
def _create_image_chunk(self, chunk: ChunkData): """Add a new chunk. Parameters ---------- chunk : ChunkData The data used to create the new chunk. """ image_chunk = ImageChunk() data = self._outline_chunk(chunk.data) # Parent VispyImageLayer will process the data then set it. self._set_node_data(image_chunk.node, data) # Make the new ImageChunk a child positioned with us. image_chunk.node.parent = self.node pos = [chunk.pos[0] * 1024, chunk.pos[1] * 1024] size = chunk.size * 16 # pos = [512, 0] # size = 7 # print(pos, size) image_chunk.node.transform = STTransform(translate=pos, scale=[size, size]) return image_chunk
def __init__(self, source=None, **kwargs): super(PolarImage, self).__init__(**kwargs) self.unfreeze() # source should be an object, which contains information about # a specific radar source self.source = source # source should contain the radar coordinates in some usable format # here I assume offset from lower left (0,0) if source is not None: xoff = source['X'] yoff = source['Y'] else: xoff = 0 yoff = 0 # this takes the image sizes and uses it for transformation self.theta = self._data.shape[0] self.range = self._data.shape[1] # PTransform takes care of making PPI from data array # rot rotates the ppi 180 deg (image origin is upper left) # the translation moves the image to centere the ppi rot = MatrixTransform() rot.rotate(180, (0, 0, 1)) self.transform = ( STTransform(translate=(self.range + xoff, self.range + yoff, 0)) * rot * PTransform()) self.freeze()
def _niimg_norm(sh, diag, translate): """Normalize the volume between (0., 1.).""" # Compute normalization ratio ratio = np.abs(diag) * sh sgn = np.sign(diag) # Get scale and translate sc = 1. / ratio tr = -(translate + np.array([0., 0, 0])) / ratio # Define transformations of each slice sg_norm = STTransform(scale=(sc[1], sc[2], 1.), translate=(tr[1], tr[2], 1.)) cr_norm = STTransform(scale=(sc[0], sc[2], 1.), translate=(sgn[0] * tr[0], tr[2], 1.)) ax_norm = STTransform(scale=(sc[1], sc[0], 1.), translate=(tr[1], sgn[0] * tr[0], 1.)) return sg_norm, cr_norm, ax_norm
def __init__(self, **kwargs): super(RadolanCanvas, self).__init__(keys='interactive', **kwargs) # set size ov Canvas self.size = 450, 450 # unfreeze needed to add more elements self.unfreeze() # add grid central widget self.grid = self.central_widget.add_grid() # add view to grid self.view = self.grid.add_view(row=0, col=0) self.view.border_color = (0.5, 0.5, 0.5, 1) # add signal emitters self.mouse_moved = EventEmitter(source=self, type="mouse_moved") # block double clicks self.events.mouse_double_click.block() # initialize empty RADOLAN image img_data = np.zeros((900, 900)) # initialize colormap, we take cubehelix for now # this is the most nice colormap for radar in vispy cmap = 'cubehelix' # initialize Image Visual with img_data # add to view self.image = Image( img_data, method='subdivide', #interpolation='bicubic', cmap=cmap, parent=self.view.scene) # add transform to Image # (mostly positioning within canvas) self.image.transform = STTransform(translate=(0, 0, 0)) # get radolan ll point coodinate into self.r0 self.r0 = utils.get_radolan_origin() # create cities (Markers and Text Visuals self.create_cities() # create PanZoomCamera self.cam = PanZoomCamera(name="PanZoom", rect=Rect(0, 0, 900, 900), aspect=1, parent=self.view.scene) self.view.camera = self.cam self._mouse_position = None self.freeze() # print FPS to console, vispy SceneCanvas internal function self.measure_fps()
def smooth_3d(vol, smooth_factor=3, correct=True): """Smooth a 3-D volume. Parameters ---------- vol : array_like The volume of shape (N, M, P) smooth_factor : int | 3 The smoothing factor. Returns ------- vol_smooth : array_like The smooth volume with the same shape as vol. """ tf = NullTransform() # No smoothing : if (not isinstance(smooth_factor, int)) or (smooth_factor < 3): return vol, tf # Smoothing array : sz = np.full((3, ), smooth_factor, dtype=int) smooth = np.ones([smooth_factor] * 3) / np.prod(sz) # Apply smoothing : sm = fftconvolve(vol, smooth, mode='same') if correct: # Get the shape of the vol and the one with 'full' convolution : vx, vy, vz = vol.shape vcx, vcy, vcz = np.array([vx, vy, vz]) + smooth_factor - 1 # Define transform : sc = [vx / vcx, vy / vcy, vz / vcz] tr = .5 * np.array([smooth_factor] * 3) tf = STTransform(scale=sc, translate=tr) return sm, tf
def __init__(self): app.Canvas.__init__(self, title='Arrows example', keys='interactive', size=(1050, 650)) line1 = curves.curve4_bezier((10.0, 0.0), (50, -190), (350, 190), (390, 0.0)) arrows1 = np.array([line1[-2], line1[-1]]).reshape((1, 4)) line2 = curves.curve4_bezier((10.0, 0.0), (190, -190), (210, 190), (390, 0.0)) arrows2 = np.array([line2[1], line2[0], line2[-2], line2[-1]]).reshape( (2, 4)) line3 = curves.curve3_bezier((10.0, 0.0), (50, 190), (390, 0.0)) arrows3 = np.array([line3[-2], line3[-1]]).reshape((1, 4)) arrow_types = ["curved", "stealth", "inhibitor_round", "angle_60"] self.lines = [] for i, arrow_type in enumerate(arrow_types): arrows = [ visuals.ArrowVisual(line1, color='w', width=6, method='agg', arrows=arrows1, arrow_type=arrow_type, arrow_size=30.0), visuals.ArrowVisual(line2, color='w', width=2, method='agg', arrows=arrows2, arrow_type=arrow_type, arrow_size=5.0), visuals.ArrowVisual(line3, color='w', width=4, method='agg', arrows=arrows3, arrow_type=arrow_type, arrow_size=10.0) ] # Translate each line visual downwards for j, visual in enumerate(arrows): x = 50 + (i * 250) y = 100 + (200 * j) visual.transform = STTransform(translate=[x, y], scale=(0.5, 1.0)) visual.events.update.connect(lambda event: self.update()) self.lines.extend(arrows) self.show()
class Canvas(app.Canvas): def __init__(self): app.Canvas.__init__(self, keys='interactive') ps = self.pixel_scale n = 10000 pos = 0.25 * np.random.randn(n, 2).astype(np.float32) color = np.random.uniform(0, 1, (n, 3)).astype(np.float32) size = np.random.uniform(2 * ps, 12 * ps, (n, 1)).astype(np.float32) self.points = MarkerVisual(pos=pos, color=color, size=size) self.panzoom = STTransform(scale=(1, 0.2), translate=(0, 500)) w2 = (self.size[0] / 2, self.size[1] / 2) self.transform = ChainTransform([ self.panzoom, STTransform(scale=w2, translate=w2), LogTransform(base=(0, 2, 0)) ]) self.tr_sys = TransformSystem(self) self.tr_sys.visual_to_document = self.transform gloo.set_state(blend=True, blend_func=('src_alpha', 'one_minus_src_alpha')) def on_mouse_move(self, event): if event.is_dragging: dxy = event.pos - event.last_event.pos button = event.press_event.button if button == 1: self.panzoom.move(dxy) elif button == 2: center = event.press_event.pos self.panzoom.zoom(np.exp(dxy * (0.01, -0.01)), center) self.update() def on_resize(self, event): self.width, self.height = event.size gloo.set_viewport(0, 0, self.width, self.height) def on_draw(self, event): gloo.clear() self.points.draw(self.tr_sys)
def test_mesh_wireframe_filter(): size = (45, 40) with TestingCanvas(size=size, bgcolor="k") as c: v = c.central_widget.add_view(border_width=0) # Create visual mdata = create_sphere(20, 40, radius=20) mesh = scene.visuals.Mesh(meshdata=mdata, shading=None, color=(0.1, 0.3, 0.7, 0.9)) wireframe_filter = WireframeFilter(color='red') mesh.attach(wireframe_filter) v.add(mesh) from vispy.visuals.transforms import STTransform mesh.transform = STTransform(translate=(20, 20)) mesh.transforms.scene_transform = STTransform(scale=(1, 1, 0.01)) rendered_with_wf = c.render() assert np.unique(rendered_with_wf[..., 0]).size >= 50 wireframe_filter.enabled = False rendered_wo_wf = c.render() # the result should be completely different # assert not allclose pytest.raises(AssertionError, np.testing.assert_allclose, rendered_with_wf, rendered_wo_wf) wireframe_filter.enabled = True wireframe_filter.wireframe_only = True rendered_with_wf_only = c.render() # the result should be different from the two cases above pytest.raises(AssertionError, np.testing.assert_allclose, rendered_with_wf_only, rendered_with_wf) pytest.raises(AssertionError, np.testing.assert_allclose, rendered_with_wf_only, rendered_wo_wf) wireframe_filter.enabled = True wireframe_filter.wireframe_only = False wireframe_filter.faces_only = True rendered_with_faces_only = c.render() # the result should be different from the cases above pytest.raises(AssertionError, np.testing.assert_allclose, rendered_with_faces_only, rendered_with_wf) pytest.raises(AssertionError, np.testing.assert_allclose, rendered_with_faces_only, rendered_wo_wf) pytest.raises(AssertionError, np.testing.assert_allclose, rendered_with_faces_only, rendered_with_wf_only)
def draw_scene(self): self.fixed_frame = scene.node.Node(self.view.scene) self.fixed_frame.transform = STTransform( translate=(-self.moving_volume.shape[2] // 2, -self.moving_volume.shape[1] // 2, -500)) fixed_volume = scene.visuals.Volume( self.fixed_volume, parent=self.fixed_frame, threshold=self.model.fixed_display_threshold.get(), emulate_texture=False) fixed_volume.cmap = TranslucentFixedColormap() fixed_volume.method = VOLUME_RENDERING_METHOD # # The transformation is done as follows: # # The translation frame handles the offset # # The centering frame picks the center of the moving frame # # The rotation frame rotates about the center # # The uncentering frame readjusts the coordinates so that 0, 0 is # placed away from the center. # self.translation_frame = scene.node.Node(self.fixed_frame) moving_volume = scene.visuals.Volume( self.moving_volume, parent=self.translation_frame, threshold=self.model.moving_display_threshold.get(), emulate_texture=False) moving_volume.cmap = TranslucentMovingColormap() moving_volume.method = VOLUME_RENDERING_METHOD self.camera = scene.cameras.TurntableCamera( parent=self.view.scene, fov=60., elevation=self.fixed_volume.shape[2] // 2, name="Turntable") self.view.camera = self.camera self.center_frame = scene.node.Node(parent=self.view) self.axis = scene.visuals.XYZAxis(parent=self.center_frame) axis_t = STTransform(scale=(50, 50, 50, 1)) self.axis.transform = axis_t.as_matrix() self.apply_translation() self.scene.events.mouse_move.connect(self.on_mouse_move)
class Canvas(app.Canvas): def __init__(self): app.Canvas.__init__(self, keys='interactive') n = 10000 pos = 0.25 * np.random.randn(n, 2).astype(np.float32) color = np.random.uniform(0, 1, (n, 3)).astype(np.float32) size = np.random.uniform(2, 12, (n, 1)).astype(np.float32) self.points = MarkerVisual(pos=pos, color=color, size=size) self.panzoom = STTransform(scale=(1, 0.2), translate=(0, 500)) w2 = (self.size[0]/2, self.size[1]/2) self.transform = ChainTransform([self.panzoom, STTransform(scale=w2, translate=w2), LogTransform(base=(0, 2, 0))]) self.tr_sys = TransformSystem(self) self.tr_sys.visual_to_document = self.transform def on_initialize(self, event): gloo.set_state(blend=True, blend_func=('src_alpha', 'one_minus_src_alpha')) def on_mouse_move(self, event): if event.is_dragging: dxy = event.pos - event.last_event.pos button = event.press_event.button if button == 1: self.panzoom.move(dxy) elif button == 2: center = event.press_event.pos self.panzoom.zoom(np.exp(dxy * (0.01, -0.01)), center) self.update() def on_resize(self, event): self.width, self.height = event.size gloo.set_viewport(0, 0, self.width, self.height) def on_draw(self, event): gloo.clear() self.points.draw(self.tr_sys)
def __init__(self): app.Canvas.__init__(self, keys='interactive') n = 10000 pos = 0.25 * np.random.randn(n, 2).astype(np.float32) color = np.random.uniform(0, 1, (n, 3)).astype(np.float32) size = np.random.uniform(2, 12, (n, 1)).astype(np.float32) self.points = MarkerVisual(pos=pos, color=color, size=size) self.panzoom = STTransform(scale=(1, 0.2), translate=(0, 500)) w2 = (self.size[0]/2, self.size[1]/2) self.transform = ChainTransform([self.panzoom, STTransform(scale=w2, translate=w2), LogTransform(base=(0, 2, 0))]) self.tr_sys = TransformSystem(self) self.tr_sys.visual_to_document = self.transform
def __init__(self): app.Canvas.__init__(self, keys='interactive', size=(800, 800)) # Create several visuals demonstrating different features of Line self.lines = [ # agg-method lines: # per-vertex color visuals.LineVisual(pos=pos, color=color, method='agg'), # solid visuals.LineVisual(pos=pos, color=(0, 0.5, 0.3, 1), method='agg'), # wide visuals.LineVisual(pos=pos, color=color, width=5, method='agg'), # GL-method lines: visuals.LineVisual(pos=pos, color=color, method='gl'), visuals.LineVisual(pos=pos, color=(0, 0.5, 0.3, 1), method='gl'), visuals.LineVisual(pos=pos, color=color, width=5, method='gl'), # GL-method: "connect" not available in AGG method yet # only connect alternate vert pairs visuals.LineVisual(pos=pos, color=(0, 0.5, 0.3, 1), connect='segments', method='gl'), # connect specific pairs visuals.LineVisual(pos=pos, color=(0, 0.5, 0.3, 1), connect=connect, method='gl'), ] counts = [0, 0] for i, line in enumerate(self.lines): # arrange lines in a grid tidx = (line.method == 'agg') x = 400 * tidx y = 140 * (counts[tidx] + 1) counts[tidx] += 1 line.transform = STTransform(translate=[x, y]) # redraw the canvas if any visuals request an update line.events.update.connect(lambda evt: self.update()) self.texts = [ visuals.TextVisual('GL', bold=True, font_size=24, color='w', pos=(200, 40)), visuals.TextVisual('Agg', bold=True, font_size=24, color='w', pos=(600, 40)) ] for text in self.texts: text.transform = NullTransform() self.visuals = self.lines + self.texts self.show()
def _master_transform(self): """vispy.visuals.transforms.STTransform: Central node's firstmost transform. """ # whenever a new parent is set, the transform is reset # to a NullTransform so we reset it here if not isinstance(self.node.transform, STTransform): self.node.transform = STTransform() return self.node.transform
def test_histogram(): """Test histogram visual""" size = (200, 100) with TestingCanvas(size=size, bgcolor='w') as c: np.random.seed(2397) data = np.random.normal(size=100) hist = Histogram(data, bins=20, color='k', parent=c.scene) hist.transform = STTransform((size[0] // 10, -size[1] // 20, 1), (100, size[1])) assert_image_approved(c.render(), "visuals/histogram.png")
def __init__(self): app.Canvas.__init__(self, keys="interactive") ps = self.pixel_scale n = 10000 pos = 0.25 * np.random.randn(n, 2).astype(np.float32) color = np.random.uniform(0, 1, (n, 3)).astype(np.float32) size = np.random.uniform(2 * ps, 12 * ps, (n, 1)).astype(np.float32) self.points = MarkerVisual(pos=pos, color=color, size=size) self.panzoom = STTransform(scale=(1, 0.2), translate=(0, 500)) w2 = (self.size[0] / 2, self.size[1] / 2) self.transform = ChainTransform( [self.panzoom, STTransform(scale=w2, translate=w2), LogTransform(base=(0, 2, 0))] ) self.tr_sys = TransformSystem(self) self.tr_sys.visual_to_document = self.transform gloo.set_state(blend=True, blend_func=("src_alpha", "one_minus_src_alpha"))
def __init__(self): QtCore.QObject.__init__(self) app.Canvas.__init__(self, keys='interactive', resizable=False) ps = self.pixel_scale self.roi_visuals = [] self.current_roi = None self.finished = False self.drawing_roi = False self.markers = [] self.panzoom = STTransform(scale=(1, 1), translate=(0, 0)) self.transform = ChainTransform([self.panzoom, STTransform(scale=[1, 1], translate=[1,1]), LogTransform(base=(0, 0, 0))]) self.tr_sys = TransformSystem(self) self.tr_sys.visual_to_document = self.transform gloo.set_state(blend=True, blend_func=('src_alpha', 'one_minus_src_alpha')) self.timer = app.Timer('auto', connect=self.on_timer, start=True)
class Canvas(QtCore.QObject, app.Canvas): roiCreated = Signal(object) roiDeleted = Signal(object) def __init__(self): QtCore.QObject.__init__(self) app.Canvas.__init__(self, keys='interactive', resizable=False) ps = self.pixel_scale self.roi_visuals = [] self.current_roi = None self.finished = False self.drawing_roi = False self.markers = [] self.panzoom = STTransform(scale=(1, 1), translate=(0, 0)) self.transform = ChainTransform([self.panzoom, STTransform(scale=[1, 1], translate=[1,1]), LogTransform(base=(0, 0, 0))]) self.tr_sys = TransformSystem(self) self.tr_sys.visual_to_document = self.transform gloo.set_state(blend=True, blend_func=('src_alpha', 'one_minus_src_alpha')) self.timer = app.Timer('auto', connect=self.on_timer, start=True) #self.native.setFixedSize(800, 600) def on_timer(self, event): self.update() def on_mouse_release(self, event): if event.button == 2 and self.drawing_roi: self.drawing_roi = False self.current_roi.draw_finished() self.roi_visuals.append(self.current_roi) self.current_roi.menu.addAction(QtGui.QAction('Export ROIs', self.current_roi.menu, triggered=lambda : save_file_gui(self.export_rois, prompt='Export ROIs to text file', filetypes='Text Files (*.txt)'))) self.current_roi.menu.addAction(QtGui.QAction('Import ROIs', self.current_roi.menu, triggered=lambda : open_file_gui(self.import_rois, prompt='Import ROIs from text file', filetypes='Text Files (*.txt)'))) self.current_roi.select() self.roiCreated.emit(self.current_roi) elif any([roi.hover for roi in self.roi_visuals]) and event.button == 2 and not self.drawing_roi and event.last_event.type == 'mouse_press': self.current_roi.contextMenuEvent(self.native.mapToGlobal(QtCore.QPoint(*event.pos))) for roi in self.roi_visuals: if roi.selected: roi.finish_translate() def export_rois(self, fname): roi_strs = [repr(roi) for roi in self.roi_visuals] with open(fname, 'w') as outf: outf.write('\n'.join(roi_strs)) def import_rois(self, fname): rois = ROIVisual.importROIs(fname) for roi in rois: while roi.id in [r.id for r in self.roi_visuals]: roi.setId(roi.id + 1) self.roi_visuals.append(roi) def translatedPoint(self, pos): return np.array([(pos[0] - self.panzoom.translate[0]) / self.panzoom.scale[0], (pos[1] - self.panzoom.translate[1]) / self.panzoom.scale[1]]) def on_mouse_press(self, event): if self.drawing_roi: return if 'Control' in event.modifiers: for roi in self.roi_visuals: if roi.contains(self.translatedPoint(event.pos)): if roi.selected: roi.deselect() else: roi.select() else: self.current_roi = None for roi in self.roi_visuals: if roi.mouseIsOver(self.translatedPoint(event.pos)): self.current_roi = roi roi.select() else: roi.deselect() def on_key_press(self, event): if event.key == 'a' and 'Control' in event.modifiers: for roi in self.roi_visuals: roi.select() elif event.key == 'Delete': for roi in self.roi_visuals[:]: if roi.selected: self.delete_roi(roi) def remove_roi(self, roi): if self.current_roi == roi: self.current_roi = None self.roi_visuals.remove(roi) self.roiDeleted.emit(roi) def on_mouse_move(self, event): pos = self.translatedPoint(event.pos) for roi in self.roi_visuals: if not self.drawing_roi: if roi.mouseIsOver(pos): self.current_roi = roi if event.is_dragging: dxy = self.translatedPoint(event.pos) - self.translatedPoint(event.last_event.pos) button = event.press_event.button if button == 1: self.panzoom.move(event.pos - event.last_event.pos) elif button == 2: if not self.drawing_roi and any([roi.mouseIsOver(pos) for roi in self.roi_visuals]): for roi in self.roi_visuals: if roi.selected: roi.translate(dxy) elif self.drawing_roi == True: self.current_roi.extend(pos) else: for roi in self.roi_visuals: roi.deselect() new_id = 1 while new_id in [roi.id for roi in self.roi_visuals]: new_id += 1 self.current_roi = ROIVisual(new_id, pos) self.drawing_roi = True self.update() def on_mouse_wheel(self, event): center = event.pos dz = event.delta[1] self.panzoom.zoom(np.exp(np.array([.1, .1]) * dz), center) def on_resize(self, event): self.width, self.height = event.size gloo.set_viewport(0, 0, self.width, self.height) def on_draw(self, event): gloo.clear() for ch in self.markers: ch.draw(self.tr_sys) for roi in self.roi_visuals: roi.draw(self.tr_sys) if self.current_roi != None: self.current_roi.draw(self.tr_sys)
volume1.transform = scene.STTransform(translate=(64, 64, 0)) volume2 = scene.visuals.Volume(vol2, parent=view.scene, threshold=0.2, emulate_texture=emulate_texture) volume2.visible = False # Create three cameras (Fly, Turntable and Arcball) fov = 60. cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly') cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov, name='Turntable') cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball') view.camera = cam2 # Select turntable at first # Create an XYZAxis visual axis = scene.visuals.XYZAxis(parent=view) s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1)) affine = s.as_matrix() axis.transform = affine # create colormaps that work well for translucent and additive volume rendering class TransFire(BaseColormap): glsl_map = """ vec4 translucent_fire(float t) { return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05)); } """ class TransGrays(BaseColormap): glsl_map = """
def __init__(self, canvas=None, aspect=None, **kwargs): self._aspect = aspect self.attach(canvas) STTransform.__init__(self, **kwargs)