def __init__(self, **kwargs): super(RadolanCanvas, self).__init__(keys='interactive', **kwargs) # set size ov Canvas self.size = 450, 450 # unfreeze needed to add more elements self.unfreeze() # add grid central widget self.grid = self.central_widget.add_grid() # add view to grid self.view = self.grid.add_view(row=0, col=0) self.view.border_color = (0.5, 0.5, 0.5, 1) # add signal emitters self.mouse_moved = EventEmitter(source=self, type="mouse_moved") # block double clicks self.events.mouse_double_click.block() # initialize empty RADOLAN image img_data = np.zeros((900, 900)) # initialize colormap, we take cubehelix for now # this is the most nice colormap for radar in vispy cmap = 'cubehelix' # initialize Image Visual with img_data # add to view self.image = Image( img_data, method='subdivide', #interpolation='bicubic', cmap=cmap, parent=self.view.scene) # add transform to Image # (mostly positioning within canvas) self.image.transform = STTransform(translate=(0, 0, 0)) # get radolan ll point coodinate into self.r0 self.r0 = utils.get_radolan_origin() # create cities (Markers and Text Visuals self.create_cities() # create PanZoomCamera self.cam = PanZoomCamera(name="PanZoom", rect=Rect(0, 0, 900, 900), aspect=1, parent=self.view.scene) self.view.camera = self.cam self._mouse_position = None self.freeze() # print FPS to console, vispy SceneCanvas internal function self.measure_fps()
def test_image_vertex_updates(): """Test image visual coordinates are only built when needed.""" size = (40, 40) with TestingCanvas(size=size, bgcolor="w") as c: shape = size + (3,) np.random.seed(0) image = Image(cmap='grays', clim=[0, 1], parent=c.scene) with mock.patch.object( image, '_build_vertex_data', wraps=image._build_vertex_data) as build_vertex_mock: data = np.random.rand(*shape) image.set_data(data) c.render() build_vertex_mock.assert_called_once() build_vertex_mock.reset_mock() # reset the count to 0 # rendering again shouldn't cause vertex coordinates to be built c.render() build_vertex_mock.assert_not_called() # changing to data of the same shape shouldn't cause it data = np.zeros_like(data) image.set_data(data) c.render() build_vertex_mock.assert_not_called() # changing to another shape should data = data[:-5, :-5] image.set_data(data) c.render() build_vertex_mock.assert_called_once()
def test_image_nan_rgb(texture_format, num_bands): size = (40, 40) data = np.ones((40, 40, num_bands)) data[:5, :5, :3] = np.nan # upper left - RGB all NaN data[:5, 20:25, 0] = np.nan # upper middle - R NaN data[:5, -5:, :3] = 0 # upper right - opaque RGB black square data[-5:, -5:, :] = np.nan # lower right RGBA all NaN if num_bands == 4: data[-5:, :5, 3] = np.nan # lower left - Alpha NaN expected = (np.ones((40, 40, 4)) * 255).astype(np.uint8) # upper left - NaN goes to opaque black expected[:5, :5, :3] = 0 # upper middle -> NaN R goes to 0 expected[:5, 20:25, 0] = 0 # upper right - opaque RGB black square expected[:5, -5:, :3] = 0 # lower right - NaN RGB/A goes to 0 # RGBA case - we see the green background behind the image expected[-5:, -5:, 0] = 0 expected[-5:, -5:, 2] = 0 if num_bands == 3: # RGB case - opaque black because Alpha defaults 1 expected[-5:, -5:, 1] = 0 # lower left - NaN Alpha goes to 0 if num_bands == 4: # see the green background behind the image expected[-5:, :5, 0] = 0 expected[-5:, :5, 2] = 0 with TestingCanvas(size=size[::-1], bgcolor=(0, 1, 0)) as c: Image(data, cmap='grays', texture_format=texture_format, parent=c.scene) rendered = c.render() np.testing.assert_allclose(rendered, expected)
def __init__(self, N, width=600, height=600, decay_rate=1.0, hormone_secretion=None): from PIL.Image import open as open_image # setup simulation self._N = N self._INITIAL_FIELD = np.array(open_image(path.join(ENV_MAP_PATH, 'envmap01.png'))).astype(np.float32) / 255. #self._INITIAL_FIELD = np.zeros(self._INITIAL_FIELD.shape) self._FIELD_WIDTH = self._INITIAL_FIELD.shape[1] self._FIELD_HEIGHT = self._INITIAL_FIELD.shape[0] self._FIELD_DECAY_RATE = decay_rate self._SECRATION = hormone_secretion sensor_th = np.linspace(0, 2*np.pi, self.SENSOR_NUM, endpoint=False) self._SENSOR_POSITION = self.AGENT_RADIUS * np.array([np.cos(sensor_th), np.sin(sensor_th)]).T self.reset() # initialize all variables, position, velocity and field status # setup display self._canvas = SceneCanvas(size=(width, height), position=(0,0), keys='interactive', title="ALife book "+self.__class__.__name__) self._canvas.events.mouse_double_click.connect(self._on_mouse_double_click) self._view = self._canvas.central_widget.add_view() self._view.camera = PanZoomCamera((0, 0, self._FIELD_WIDTH, self._FIELD_HEIGHT), aspect=1) self._field_image = Image(self._field, interpolation='nearest', parent=self._view.scene, method='subdivide', clim=(0,1)) self._agent_polygon = [] for i in range(self._N): p = AntSimulator._generate_agent_visual_polygon(self.AGENT_RADIUS) p.parent = self._field_image self._agent_polygon.append(p) self._canvas.show()
def test_image_equal_clims(texture_format, num_channels): """Test image visual with equal clims.""" size = (40, 40) input_dtype = np.uint8 shape = size + (num_channels,) if num_channels > 0 else size np.random.seed(0) data = _make_test_data(shape, input_dtype) with TestingCanvas(size=size[::-1], bgcolor="w") as c: Image(data, cmap='viridis', texture_format=texture_format, clim=(128.0, 128.0), parent=c.scene) rendered = c.render()[..., :3] if num_channels >= 3: # RGBs don't have colormaps assert rendered.sum() == 0 return # not all black assert rendered.sum() != 0 # not all white assert rendered.sum() != 255 * rendered.size # should be all the same value r_unique = np.unique(rendered[..., 0]) g_unique = np.unique(rendered[..., 1]) b_unique = np.unique(rendered[..., 2]) assert r_unique.size == 1 assert g_unique.size == 1 assert b_unique.size == 1
def _add_graphics_elements(self): """ Create all the graphics objects (VISPY objects), put them on the canvas. """ # Image self._image_object = Image(None, parent=self._viewbox.scene) self._image_object.set_gl_state('translucent', depth_test=False) self._image_object.order = 1 self._image_object.visible = True self._text_box_width = 150 self._text_box_height = 60 self._text_box_offset = 10 # Text background box in upper-left corner self._text_bkg_rect = vispy.scene.visuals.Rectangle( [ self._text_box_width / 2 + self._text_box_offset, self._text_box_height / 2 + self._text_box_offset ], color=[0.1, 0.0, 0.0, .8], border_color=[0.1, 0.0, 0.0], border_width=2, height=self._text_box_height, width=self._text_box_width, radius=10.0, parent=self._canvas.scene) self._text_bkg_rect.set_gl_state('translucent', depth_test=False) self._text_bkg_rect.visible = True self._text_bkg_rect.order = 2 self._resize_text_bkg_box() # Text self._font1_size = 10 self._font2_size = 18 self._vspace = self._font1_size * 2.2 self._text_pos = [ self._text_box_offset + 10, self._text_box_offset + 10 ] self._text_obj = vispy.scene.visuals.Text("", parent=self._canvas.scene, color=[0.9, 0.8, 0.8], anchor_x='left', anchor_y='top') self._text_obj.pos = self._text_pos self._text_obj.font_size = self._font1_size self._text_obj.visible = True self._text_obj.order = 3 self._text2_pos = [self._text_pos[0], self._vspace + self._text_pos[1]] self._text2_obj = vispy.scene.visuals.Text("", parent=self._canvas.scene, color=[0.9, 0.8, 0.8], anchor_x='left', anchor_y='top') self._text2_obj.pos = self._text2_pos self._text2_obj.font_size = self._font2_size self._text2_obj.visible = True self._text2_obj.order = 3
def test_colormap_coolwarm(): """Test colormap support using coolwarm preset colormap""" with TestingCanvas(size=size, bgcolor='w') as c: idata = np.linspace(255, 0, size[0] * size[1]).astype(np.ubyte) data = idata.reshape((size[0], size[1])) image = Image(cmap='coolwarm', clim='auto', parent=c.scene) image.set_data(data) assert_image_approved(c.render(), "visuals/colormap_coolwarm.png")
def test_colormap_CubeHelix(): """Test colormap support using cubehelix colormap in only blues""" with TestingCanvas(size=size, bgcolor='w') as c: idata = np.linspace(255, 0, size[0] * size[1]).astype(np.ubyte) data = idata.reshape((size[0], size[1])) image = Image(cmap=get_colormap('cubehelix', rot=0, start=0), clim='auto', parent=c.scene) image.set_data(data) assert_image_approved(c.render(), "visuals/colormap_cubehelix.png")
def test_colormap_single_hue(): """Test colormap support using a single hue()""" with TestingCanvas(size=size, bgcolor='w') as c: idata = np.linspace(255, 0, size[0] * size[1]).astype(np.ubyte) data = idata.reshape((size[0], size[1])) image = Image(cmap=get_colormap('single_hue', 255), clim='auto', parent=c.scene) image.set_data(data) assert_image_approved(c.render(), "visuals/colormap_hue.png")
def test_image_clims_and_gamma(input_dtype, texture_format, num_channels, clim_on_init, data_on_init): """Test image visual with clims and gamma on shader.""" size = (40, 40) if texture_format == '__dtype__': texture_format = input_dtype shape = size + (num_channels, ) if num_channels > 0 else size np.random.seed(0) data = _make_test_data(shape, input_dtype) orig_clim, new_clim = _get_orig_and_new_clims(input_dtype) # 16-bit integers and above seem to have precision loss when scaled on the CPU is_16int_cpu_scaled = (np.dtype(input_dtype).itemsize >= 2 and np.issubdtype(input_dtype, np.integer) and texture_format is None) clim_atol = 2 if is_16int_cpu_scaled else 1 gamma_atol = 3 if is_16int_cpu_scaled else 2 kwargs = {} if clim_on_init: kwargs['clim'] = orig_clim if data_on_init: kwargs['data'] = data # default is RGBA, anything except auto requires reformat set_data_fails = (num_channels != 4 and texture_format is not None and texture_format != 'auto') with TestingCanvas(size=size[::-1], bgcolor="w") as c: image = Image(cmap='grays', texture_format=texture_format, parent=c.scene, **kwargs) if not data_on_init: _set_image_data(image, data, set_data_fails) if set_data_fails: return rendered = c.render() _dtype = rendered.dtype shape_ratio = rendered.shape[0] // data.shape[0] rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype) _compare_render(data, rendered1) # adjust color limits image.clim = new_clim rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype) scaled_data = (np.clip(data, new_clim[0], new_clim[1]) - new_clim[0]) / (new_clim[1] - new_clim[0]) _compare_render(scaled_data, rendered2, rendered1, atol=clim_atol) # adjust gamma image.gamma = 2 rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype) _compare_render(scaled_data**2, rendered3, rendered2, atol=gamma_atol)
def test_colormap_discrete(): """Test discrete RGB colormap""" with TestingCanvas(size=size, bgcolor='w') as c: idata = np.linspace(255, 0, size[0] * size[1]).astype(np.ubyte) data = idata.reshape((size[0], size[1])) image = Image(cmap=Colormap(colors=['r', 'g', 'b'], interpolation='zero'), clim='auto', parent=c.scene) image.set_data(data) assert_image_approved(c.render(), "visuals/colormap_rgb.png")
def test_colormap(): """Test colormap support for non-uniformly distributed control-points""" with TestingCanvas(size=size, bgcolor='w') as c: idata = np.linspace(255, 0, size[0] * size[1]).astype(np.ubyte) data = idata.reshape((size[0], size[1])) image = Image(cmap=Colormap(colors=['k', 'w', 'r'], controls=[0.0, 0.1, 1.0]), clim='auto', parent=c.scene) image.set_data(data) assert_image_approved(c.render(), "visuals/colormap_kwr.png")
def test_image(is_3d): """Test image visual""" size = (100, 50) with TestingCanvas(size=size, bgcolor='w') as c: image = Image(cmap='grays', clim=[0, 1], parent=c.scene) shape = (size[1] - 10, size[0] - 10) + ((3, ) if is_3d else ()) np.random.seed(379823) data = np.random.rand(*shape) image.set_data(data) assert_image_approved( c.render(), "visuals/image%s.png" % ("_rgb" if is_3d else "_mono"))
def test_colormap_discrete_nu(): """Test discrete colormap with non-uniformly distributed control-points""" with TestingCanvas(size=size, bgcolor='w') as c: idata = np.linspace(255, 0, size[0] * size[1]).astype(np.ubyte) data = idata.reshape((size[0], size[1])) image = Image(cmap=Colormap(np.array([[0, .75, 0], [.75, .25, .5]]), [0., .25, 1.], interpolation='zero'), clim='auto', parent=c.scene) image.set_data(data) assert_image_approved(c.render(), "visuals/colormap_nu.png")
def test_image(): """Test image visual""" size = (100, 50) with TestingCanvas(size=size, bgcolor='w') as c: for three_d in (True, False): shape = (size[1] - 10, size[0] - 10) + ((3, ) if three_d else ()) np.random.seed(379823) data = np.random.rand(*shape) image = Image(data, cmap='grays', clim=[0, 1]) c.draw_visual(image) assert_image_approved( "screenshot", "visuals/image%s.png" % ("_rgb" if three_d else "_mono"))
def test_image(): """Test image visual""" size = (100, 50) with TestingCanvas(size=size, bgcolor='w') as c: for three_d in (True, False): shape = size[::-1] + ((3, ) if three_d else ()) data = np.random.rand(*shape) image = Image(data, cmap='grays', clim=[0, 1]) c.draw_visual(image) if three_d: expected = data else: expected = np.tile(data[:, :, np.newaxis], (1, 1, 3)) assert_image_equal("screenshot", expected)
def _add_graphics_elements(self): """ Create the VISPY graphics objects """ # Image self._image_object = Image(self._image, parent=self._viewbox.scene) self._image_object.set_gl_state('translucent', depth_test=False) self._image_object.order = 1 self._image_object.visible = True self._text_box_width = 150 self._text_box_height = 40 self._text_box_offset = 10 # Text background box in upper-left corner self._text_bkg_rect = vispy.scene.visuals.Rectangle( [ self._text_box_width / 2 + self._text_box_offset, self._text_box_height / 2 + self._text_box_offset ], color=[0.1, 0.0, 0.0, .8], border_color=[0.1, 0.0, 0.0], border_width=2, height=self._text_box_height, width=self._text_box_width, radius=10.0, parent=self._canvas.scene) self._text_bkg_rect.set_gl_state('translucent', depth_test=False) self._text_bkg_rect.visible = True self._text_bkg_rect.order = 2 # Text self._text = "?" self._text_pos = [ self._text_box_offset + 10, self._text_box_offset + 10 ] self._text_obj = vispy.scene.visuals.Text(self._text, parent=self._canvas.scene, color=[0.9, 0.8, 0.8], anchor_x='left', anchor_y='top') self._text_obj.pos = self._text_pos self._text_obj.font_size = 18 self._text_obj.visible = True self._text_obj.order = 3
def image_visual(image, parent=None): """ Returns a :class:`vispy.scene.visuals.Image` class instance using given image. Parameters ---------- image : array_like Image. parent : Node, optional Parent of the image visual in the `SceneGraph`. Returns ------- Image Image visual. """ image = np.clip(image, 0, 1) return Image(image, parent=parent)
def test_isocurve(ctx): ctx = VispyCtx(display_status=True) scale = 0.1 cx = np.arange(-100, 100, scale) data = np.meshgrid(cx, cx) ff = f(data) levels = [0, 10] image = Image(ff, parent=ctx.view.scene) # move image behind curves image.transform = transforms.STTransform(scale=(scale, scale), translate=(0, 0, 0.5)) color_lev = ['r', 'black'] curve = Isocurve(ff, levels=levels, color_lev=color_lev, parent=ctx.view.scene) curve.transform = transforms.STTransform(scale=(scale, scale)) # Set 2D camera ctx.view.camera = PanZoomCamera(aspect=1) # the camera will scale to the contents in the scene ctx.view.camera.set_range() ctx.run(cam=False) return ctx
def test_image_clims_and_gamma(): """Test image visual with clims and gamma on shader.""" size = (40, 40) with TestingCanvas(size=size, bgcolor="w") as c: for three_d in (True,): shape = size + ((3,) if three_d else ()) np.random.seed(0) image = Image(cmap='grays', clim=[0, 1], parent=c.scene) data = np.random.rand(*shape) image.set_data(data) rendered = c.render() _dtype = rendered.dtype shape_ratio = rendered.shape[0] // data.shape[0] rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype) predicted = _make_rgba(data) assert np.allclose(predicted, rendered1, atol=1) # adjust contrast limits new_clim = (0.3, 0.8) image.clim = new_clim rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype) scaled_data = np.clip((data - new_clim[0]) / np.diff(new_clim)[0], 0, 1) predicted = _make_rgba(scaled_data) assert np.allclose(predicted, rendered2, atol=1) assert not np.allclose(rendered1, rendered2, atol=10) # adjust gamma image.gamma = 2 rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype) predicted = _make_rgba(scaled_data ** 2) assert np.allclose( predicted.astype(np.float), rendered3.astype(np.float), atol=2 ) assert not np.allclose( rendered2.astype(np.float), rendered3.astype(np.float), atol=10 )
def test_image_nan_single_band(texture_format): size = (40, 40) data = np.ones((40, 40)) data[:5, :5] = np.nan data[:5, -5:] = 0 expected = (np.ones((40, 40, 4)) * 255).astype(np.uint8) # black square expected[:5, -5:, :3] = 0 if texture_format is None: # CPU scaling's NaNs get converted to 0s expected[:5, :5, :3] = 0 else: # GPU receives NaNs # nan - transparent square expected[:5, :5, 0] = 0 expected[:5, :5, 1] = 255 # match the 'green' background expected[:5, :5, 2] = 0 with TestingCanvas(size=size[::-1], bgcolor=(0, 1, 0)) as c: Image(data, cmap='grays', texture_format=texture_format, parent=c.scene) rendered = c.render() np.testing.assert_allclose(rendered, expected)
def add_image(self, image_data): image = Image(data=image_data, parent=self.view.scene) if self.view.camera: self.view.camera.set_range() return image