def test_container(): container = actor.Container() axes = actor.axes() container.add(axes) npt.assert_equal(len(container), 1) npt.assert_equal(container.GetBounds(), axes.GetBounds()) npt.assert_equal(container.GetCenter(), axes.GetCenter()) npt.assert_equal(container.GetLength(), axes.GetLength()) container.clear() npt.assert_equal(len(container), 0) container.add(axes) container_shallow_copy = shallow_copy(container) container_shallow_copy.add(actor.axes()) assert_greater(len(container_shallow_copy), len(container)) npt.assert_equal(container_shallow_copy.GetPosition(), container.GetPosition()) npt.assert_equal(container_shallow_copy.GetVisibility(), container.GetVisibility()) # Check is the shallow_copy do not modify original container container_shallow_copy.SetVisibility(False) npt.assert_equal(container.GetVisibility(), True) container_shallow_copy.SetPosition((1, 1, 1)) npt.assert_equal(container.GetPosition(), (0, 0, 0))
def test_parallel_projection(): scene = window.Scene() axes = actor.axes() axes2 = actor.axes() axes2.SetPosition((2, 0, 0)) # Add both axes. scene.add(axes, axes2) # Put the camera on a angle so that the # camera can show the difference between perspective # and parallel projection scene.set_camera((1.5, 1.5, 1.5)) scene.GetActiveCamera().Zoom(2) # window.show(scene, reset_camera=True) scene.reset_camera() arr = window.snapshot(scene) scene.projection('parallel') # window.show(scene, reset_camera=False) arr2 = window.snapshot(scene) # Because of the parallel projection the two axes # will have the same size and therefore occupy more # pixels rather than in perspective projection were # the axes being further will be smaller. npt.assert_equal(np.sum(arr2 > 0) > np.sum(arr > 0), True) scene.projection('perspective') arr2 = window.snapshot(scene) npt.assert_equal(np.sum(arr2 > 0), np.sum(arr > 0))
def test_peak_slicer(interactive=False): _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4') # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape peak_dirs = np.zeros((11, 11, 11, 3, 3)) peak_values = np.random.rand(11, 11, 11, 3) peak_dirs[:, :, :] = _peak_dirs scene = window.Scene() peak_actor = actor.peak_slicer(peak_dirs) scene.add(peak_actor) scene.add(actor.axes((11, 11, 11))) if interactive: window.show(scene) scene.clear() scene.add(peak_actor) scene.add(actor.axes((11, 11, 11))) for k in range(11): peak_actor.display_extent(0, 10, 0, 10, k, k) for j in range(11): peak_actor.display_extent(0, 10, j, j, 0, 10) for i in range(11): peak_actor.display(i, None, None) scene.rm_all() peak_actor = actor.peak_slicer( peak_dirs, peak_values, mask=None, affine=np.diag([3, 2, 1, 1]), colors=None, opacity=0.8, linewidth=3, lod=True, lod_points=10 ** 4, lod_points_size=3) scene.add(peak_actor) scene.add(actor.axes((11, 11, 11))) if interactive: window.show(scene) report = window.analyze_scene(scene) ex = ['vtkLODActor', 'vtkOpenGLActor'] npt.assert_equal(report.actors_classnames, ex) # 6d data data_6d = (255 * np.random.rand(5, 5, 5, 5, 5, 5)) npt.assert_raises(ValueError, actor.peak_slicer, data_6d, data_6d)
def test_peak_slicer(interactive=False): _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4') # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape peak_dirs = np.zeros((11, 11, 11, 3, 3)) peak_values = np.random.rand(11, 11, 11, 3) peak_dirs[:, :, :] = _peak_dirs renderer = window.Renderer() peak_actor = actor.peak_slicer(peak_dirs) renderer.add(peak_actor) renderer.add(actor.axes((11, 11, 11))) if interactive: window.show(renderer) renderer.clear() renderer.add(peak_actor) renderer.add(actor.axes((11, 11, 11))) for k in range(11): peak_actor.display_extent(0, 10, 0, 10, k, k) for j in range(11): peak_actor.display_extent(0, 10, j, j, 0, 10) for i in range(11): peak_actor.display(i, None, None) renderer.rm_all() peak_actor = actor.peak_slicer( peak_dirs, peak_values, mask=None, affine=np.diag([3, 2, 1, 1]), colors=None, opacity=1, linewidth=3, lod=True, lod_points=10 ** 4, lod_points_size=3) renderer.add(peak_actor) renderer.add(actor.axes((11, 11, 11))) if interactive: window.show(renderer) report = window.analyze_renderer(renderer) ex = ['vtkLODActor', 'vtkOpenGLActor', 'vtkOpenGLActor', 'vtkOpenGLActor'] npt.assert_equal(report.actors_classnames, ex)
def test_shader_to_actor(interactive=False): cube = generate_cube_with_effect() scene = window.Scene() scene.add(cube) if interactive: scene.add(actor.axes()) window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) # test errors npt.assert_raises(ValueError, shader_to_actor, cube, "error", vertex_impl) npt.assert_raises(ValueError, shader_to_actor, cube, "geometry", vertex_impl) npt.assert_raises(ValueError, shader_to_actor, cube, "vertex", vertex_impl, block="error") npt.assert_raises(ValueError, replace_shader_in_actor, cube, "error", vertex_impl)
def test_billboard_actor(interactive=False): scene = window.Scene() scene.background((1, 1, 1)) centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) colors = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]]) scale = [1, 2, 1] fake_sphere = \ """ float len = length(point); float radius = 1.; if(len > radius) {discard;} vec3 normalizedPoint = normalize(vec3(point.xy, sqrt(1. - len))); vec3 direction = normalize(vec3(1., 1., 1.)); float df = max(0, dot(direction, normalizedPoint)); float sf = pow(df, 24); fragOutput0 = vec4(max(df * color, sf * vec3(1)), 1); """ billboard_actor = actor.billboard(centers, colors=colors.astype(np.uint8), scale=scale, fs_impl=fake_sphere) scene.add(billboard_actor) scene.add(actor.axes()) if interactive: window.show(scene)
def test_matplotlib_figure(): names = ['group_a', 'group_b', 'group_c'] values = [1, 10, 100] fig = plt.figure(figsize=(9, 3)) plt.subplot(131) plt.bar(names, values) plt.subplot(132) plt.scatter(names, values) plt.subplot(133) plt.plot(names, values) plt.suptitle('Categorical Plotting') arr = matplotlib_figure_to_numpy(fig, dpi=300, transparent=True) fig_actor = actor.figure(arr, 'cubic') fig_actor2 = actor.figure(arr, 'cubic') scene = window.Scene() scene.background((1, 1, 1.)) ax_actor = actor.axes(scale=(1000, 1000, 1000)) scene.add(ax_actor) scene.add(fig_actor) scene.add(fig_actor2) ax_actor.SetPosition(0, 500, -800) fig_actor2.SetPosition(500, 800, -400) display = window.snapshot(scene, order_transparent=True) res = window.analyze_snapshot(display, bg_color=(255, 255, 255.), colors=[(31, 119, 180), (255, 0, 0)], find_objects=False) npt.assert_equal(res.colors_found, [True, True])
def test_vertices_from_actor(interactive=False): expected = np.array([[1.5, -0.5, 0.], [1.5, 0.5, 0], [2.5, 0.5, 0], [2.5, -0.5, 0], [-1, 1, 0], [-1, 3, 0], [1, 3, 0], [1, 1, 0], [-0.5, -0.5, 0], [-0.5, 0.5, 0], [0.5, 0.5, 0], [0.5, -0.5, 0]]) centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) colors = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]]) scales = [1, 2, 1] verts, faces = fp.prim_square() res = fp.repeat_primitive(verts, faces, centers=centers, colors=colors, scales=scales) big_verts = res[0] big_faces = res[1] big_colors = res[2] actr = get_actor_from_primitive(big_verts, big_faces, big_colors) actr.GetProperty().BackfaceCullingOff() if interactive: scene = window.Scene() scene.add(actor.axes()) scene.add(actr) window.show(scene) res_vertices = vertices_from_actor(actr) npt.assert_array_almost_equal(expected, res_vertices)
def test_billboard_actor(interactive=False): scene = window.Scene() scene.background((1, 1, 1)) centers = np.array([[0, 0, 0], [5, -5, 5], [-7, 7, -7], [10, 10, 10], [10.5, 11.5, 11.5], [12, -12, -12], [-17, 17, 17], [-22, -22, 22]]) colors = np.array([[1, 1, 0], [0, 0, 0], [1, 0, 1], [0, 0, 1], [1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 1, 1]]) scales = [6, .4, 1.2, 1, .2, .7, 3, 2] fake_sphere = \ """ float len = length(point); float radius = 1.; if(len > radius) {discard;} vec3 normalizedPoint = normalize(vec3(point.xy, sqrt(1. - len))); vec3 direction = normalize(vec3(1., 1., 1.)); float df_1 = max(0, dot(direction, normalizedPoint)); float sf_1 = pow(df_1, 24); fragOutput0 = vec4(max(df_1 * color, sf_1 * vec3(1)), 1); """ billboard_actor = actor.billboard(centers, colors=colors, scales=scales, fs_impl=fake_sphere) scene.add(billboard_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 8)
def test_text_widget(): interactive = False renderer = window.Renderer() axes = actor.axes() window.add(renderer, axes) renderer.ResetCamera() show_manager = window.ShowManager(renderer, size=(900, 900)) if interactive: show_manager.initialize() show_manager.render() fetch_viz_icons() button_png = read_viz_icons(fname='home3.png') def button_callback(obj, event): print('Button Pressed') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.8, 1.2), (100, 100)) global rulez rulez = True def text_callback(obj, event): global rulez print('Text selected') if rulez: obj.GetTextActor().SetInput("Diffusion Imaging Rulez!!") rulez = False else: obj.GetTextActor().SetInput("Diffusion Imaging in Python") rulez = True show_manager.render() text = widget.text(show_manager.iren, show_manager.ren, text_callback, message="Diffusion Imaging in Python", left_down_pos=(0., 0.), right_top_pos=(0.4, 0.05), opacity=1., border=False) if not interactive: button.Off() text.Off() pass if interactive: show_manager.render() show_manager.start() report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 3)
def test_vertices_from_actor(interactive=False): expected = np.array([[1.5, -0.5, 0.], [1.5, 0.5, 0], [2.5, 0.5, 0], [2.5, -0.5, 0], [-1, 1, 0], [-1, 3, 0], [1, 3, 0], [1, 1, 0], [-0.5, -0.5, 0], [-0.5, 0.5, 0], [0.5, 0.5, 0], [0.5, -0.5, 0]]) centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) colors = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]]) scales = [1, 2, 1] verts, faces = fp.prim_square() res = fp.repeat_primitive(verts, faces, centers=centers, colors=colors, scales=scales) big_verts = res[0] big_faces = res[1] big_colors = res[2] actr = get_actor_from_primitive(big_verts, big_faces, big_colors) actr.GetProperty().BackfaceCullingOff() if interactive: scene = window.Scene() scene.add(actor.axes()) scene.add(actr) window.show(scene) res_vertices = vertices_from_actor(actr) res_vertices_vtk = vertices_from_actor(actr, as_vtk=True) npt.assert_array_almost_equal(expected, res_vertices) npt.assert_equal(isinstance(res_vertices_vtk, vtk.vtkDoubleArray), True) # test colors_from_actor: l_colors = utils.colors_from_actor(actr) l_colors_vtk = utils.colors_from_actor(actr, as_vtk=True) l_colors_none = utils.colors_from_actor(actr, array_name='col') npt.assert_equal(l_colors_none, None) npt.assert_equal(isinstance(l_colors_vtk, vtk.vtkUnsignedCharArray), True) npt.assert_equal(np.unique(l_colors, axis=0).shape, colors.shape) l_array = utils.array_from_actor(actr, 'colors') l_array_vtk = utils.array_from_actor(actr, 'colors', as_vtk=True) l_array_none = utils.array_from_actor(actr, 'col') npt.assert_array_equal(l_array, l_colors) npt.assert_equal(l_array_none, None) npt.assert_equal(isinstance(l_array_vtk, vtk.vtkUnsignedCharArray), True)
def initialize(self): # Bring used components self.registerVtkWebProtocol(vtk_protocols.vtkWebMouseHandler()) self.registerVtkWebProtocol(vtk_protocols.vtkWebViewPort()) self.registerVtkWebProtocol(vtkWebPublishImageDelivery(decode=False)) # Custom API self.registerVtkWebProtocol(MouseWheel()) # tell the C++ web app to use no encoding. # ParaViewWebPublishImageDelivery must be set to decode=False to match. self.getApplication().SetImageEncoding(0) # Update authentication key to use self.updateSecret(_Server.authKey) if not _Server.view: scene = window.Scene() scene.background((1, 1, 1)) if os.path.isfile(_Server.centersToLoad): with open(_Server.centersToLoad) as f: f_dict = json.loads(f.read()) centers = np.array(f_dict["centers"]) n_points = len(centers) colors = np.array(f_dict["colors"]) directions = np.random.rand(n_points, 3) else: n_points = 10000 translate = 100 centers = translate * np.random.rand(n_points, 3) - translate / 2 colors = 255 * np.random.rand(n_points, 3) directions = np.random.rand(n_points, 3) # scales = np.random.rand(n_points, 3) prim_type = ['sphere', 'ellipsoid', 'torus'] primitive = [random.choice(prim_type) for _ in range(n_points)] sdf_actor = actor.sdf(centers, directions, colors, primitive) scene.add(sdf_actor) scene.add(actor.axes()) showm = window.ShowManager(scene) renderWindow = showm.window showm.iren.EnableRenderOff() self.getApplication().GetObjectIdMap().SetActiveObject( "VIEW", renderWindow)
def test_deprecated(): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DeprecationWarning) scene = window.Renderer() npt.assert_equal(scene.size(), (0, 0)) npt.assert_equal(len(w), 1) npt.assert_(issubclass(w[-1].category, DeprecationWarning)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DeprecationWarning) scene = window.renderer(background=(0.0, 1.0, 0.0)) npt.assert_equal(scene.size(), (0, 0)) npt.assert_equal(len(w), 1) npt.assert_(issubclass(w[-1].category, DeprecationWarning)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DeprecationWarning) scene = window.ren() npt.assert_equal(scene.size(), (0, 0)) npt.assert_equal(len(w), 2) npt.assert_(issubclass(w[-1].category, DeprecationWarning)) scene = window.Scene() with warnings.catch_warnings(record=True) as l_warn: warnings.simplefilter("always", DeprecationWarning) obj = actor.axes(scale=(1, 1, 1)) window.add(scene, obj) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) window.rm(scene, obj) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 0) window.add(scene, obj) window.rm_all(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 0) window.add(scene, obj) window.clear(scene) report = window.analyze_renderer(scene) npt.assert_equal(report.actors, 0) deprecated_warns = [ w for w in l_warn if issubclass(w.category, DeprecationWarning) ] npt.assert_equal(len(deprecated_warns), 7) npt.assert_(issubclass(l_warn[-1].category, DeprecationWarning))
def test_sdf_actor(interactive=False): scene = window.Scene() scene.background((1, 1, 1)) centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) * 11 colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) scales = [1, 2, 3] primitive = ['sphere', 'ellipsoid', 'torus'] sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 3)
def test_rotate(interactive=False): A = np.zeros((50, 50, 50)) A[20:30, 20:30, 10:40] = 100 act = actor.contour_from_roi(A) scene = window.Scene() scene.add(act) if interactive: window.show(scene) else: arr = window.snapshot(scene, offscreen=True) red = arr[..., 0].sum() red_sum = np.sum(red) act2 = utils.shallow_copy(act) rot = (90, 1, 0, 0) rotate(act2, rot) act3 = utils.shallow_copy(act) scene.add(act2) rot = (90, 0, 1, 0) rotate(act3, rot) scene.add(act3) scene.add(actor.axes()) if interactive: window.show(scene) else: arr = window.snapshot(scene, offscreen=True) red_sum_new = arr[..., 0].sum() npt.assert_equal(red_sum_new > red_sum, True)
def test_square_actor(interactive=False): scene = window.Scene() centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) colors = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]]) scale = [1, 2, 3] verts, faces = prim_square() res = repeat_primitive(verts, faces, centers=centers, colors=colors, scale=scale) big_verts, big_faces, big_colors, _ = res sq_actor = get_actor_from_primitive(big_verts, big_faces, big_colors) sq_actor.GetProperty().BackfaceCullingOff() scene.add(sq_actor) scene.add(actor.axes()) if interactive: window.show(scene)
def test_matplotlib_figure(): names = ['group_a', 'group_b', 'group_c'] values = [1, 10, 100] fig = plt.figure(figsize=(9, 3)) plt.subplot(131) plt.bar(names, values) plt.subplot(132) plt.scatter(names, values) plt.subplot(133) plt.plot(names, values) plt.suptitle('Categorical Plotting') arr = matplotlib_figure_to_numpy(fig, dpi=500, transparent=True) plt.close('all') fig_actor = actor.figure(arr, 'cubic') fig_actor2 = actor.figure(arr, 'cubic') scene = window.Scene() scene.background((1, 1, 1.)) ax_actor = actor.axes(scale=(1000, 1000, 1000)) scene.add(ax_actor) scene.add(fig_actor) scene.add(fig_actor2) ax_actor.SetPosition(-50, 500, -800) fig_actor2.SetPosition(500, 800, -400) display = window.snapshot(scene, 'test_mpl.png', order_transparent=False, offscreen=True) res = window.analyze_snapshot(display, bg_color=(255, 255, 255.), colors=[(31, 119, 180)], find_objects=False) # omit assert from now until we know why snapshot creates # different colors in Github Actions but not on our computers # npt.assert_equal(res.colors_found, [True, True]) # TODO: investigate further this issue with snapshot in Actions pass
def test_active_camera(): scene = window.Scene() scene.add(actor.axes(scale=(1, 1, 1))) scene.reset_camera() scene.reset_clipping_range() direction = scene.camera_direction() position, focal_point, view_up = scene.get_camera() scene.set_camera((0., 0., 1.), (0., 0., 0), view_up) position, focal_point, view_up = scene.get_camera() npt.assert_almost_equal(np.dot(direction, position), -1) scene.zoom(1.5) new_position, _, _ = scene.get_camera() npt.assert_array_almost_equal(position, new_position) scene.zoom(1) # rotate around focal point scene.azimuth(90) position, _, _ = scene.get_camera() npt.assert_almost_equal(position, (1.0, 0.0, 0)) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.colors_found, [True]) # rotate around camera's center scene.yaw(90) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=[(0, 0, 0)]) npt.assert_equal(report.colors_found, [True]) scene.yaw(-90) scene.elevation(90) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=(0, 255, 0)) npt.assert_equal(report.colors_found, [True]) scene.set_camera((0., 0., 1.), (0., 0., 0), view_up) # vertical rotation of the camera around the focal point scene.pitch(10) scene.pitch(-10) # rotate around the direction of projection scene.roll(90) # inverted normalized distance from focal point along the direction # of the camera position, _, _ = scene.get_camera() scene.dolly(0.5) new_position, focal_point, view_up = scene.get_camera() npt.assert_almost_equal(position[2], 0.5 * new_position[2]) cam = scene.camera() npt.assert_equal(new_position, cam.GetPosition()) npt.assert_equal(focal_point, cam.GetFocalPoint()) npt.assert_equal(view_up, cam.GetViewUp())
blue_ball = p.createMultiBody(baseMass=0.5, baseCollisionShapeIndex=blue_ball_coll, basePosition=[-10, 0, 0], baseOrientation=[0, 0, 0, 1]) ############################################################################### # We set the coefficient of restitution of both the balls to `0.6`. p.changeDynamics(red_ball, -1, restitution=0.6) p.changeDynamics(blue_ball, -1, restitution=0.6) ############################################################################### # We add all the actors to the scene. scene = window.Scene() scene.add(actor.axes()) scene.add(red_ball_actor) scene.add(blue_ball_actor) showm = window.ShowManager(scene, size=(900, 700), reset_camera=False, order_transparent=True) showm.initialize() counter = itertools.count() ############################################################################### # Method to sync objects. def sync_actor(actor, multibody): pos, orn = p.getBasePositionAndOrientation(multibody)
############################################################################### # Access the memory of the vertices of all the cubes vertices = utils.vertices_from_actor(fury_actor) num_vertices = vertices.shape[0] num_objects = centers.shape[0] ############################################################################### # Access the memory of the colors of all the cubes vcolors = utils.colors_from_actor(fury_actor, 'colors') ############################################################################### # Adding an actor showing the axes of the world coordinates ax = actor.axes(scale=(10, 10, 10)) scene.add(fury_actor) scene.add(label_actor) scene.add(ax) scene.reset_camera() ############################################################################### # Create the Picking manager pickm = pick.PickingManager() ############################################################################### # Time to make the callback which will be called when we pick an object
box_actor = actor.box(centers=np.array([[0, 0, 0]]), directions=np.array([[0, 0, 0]]), scales=(0.02, 0.02, 0.02), colors=np.array([[1, 0, 0]])) ball_actor = actor.sphere(centers=np.array([[0, 0, 0]]), radii=ball_radius, colors=np.array([1, 0, 1])) ############################################################################### # Now we add the necessary actors to the scene and set the camera for better # visualization. scene = window.Scene() scene.set_camera((10.28, -7.10, 6.39), (0.0, 0.0, 0.4), (-0.35, 0.26, 1.0)) scene.add(actor.axes(scale=(0.5, 0.5, 0.5)), base_actor, brick_actor) scene.add(rope_actor, box_actor, ball_actor) showm = window.ShowManager(scene, size=(900, 768), reset_camera=False, order_transparent=True) showm.initialize() ############################################################################### # Position the base correctly. base_pos, base_orn = p.getBasePositionAndOrientation(base) base_actor.SetPosition(*base_pos)
def test_odf_slicer(interactive=False): # TODO: we should change the odf_slicer to work directly # vertices and faces of a sphere rather that needing # a specific type of sphere. We can use prim_sphere # as an alternative to get_sphere. vertices, faces = prim_sphere('repulsion100', True) sphere = Sphere() sphere.vertices = vertices sphere.faces = faces shape = (11, 11, 11, 100) odfs = np.ones(shape) affine = np.array([[2.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, 3.0], [0.0, 0.0, 2.0, 1.0], [0.0, 0.0, 0.0, 1.0]]) mask = np.ones(odfs.shape[:3], bool) mask[:4, :4, :4] = False # Test that affine and mask work odf_actor = actor.odf_slicer(odfs, sphere=sphere, affine=affine, mask=mask, scale=.25, colormap='blues') k = 2 I, J, _ = odfs.shape[:3] odf_actor.display_extent(0, I - 1, 0, J - 1, k, k) scene = window.Scene() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene, reset_camera=False) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 11 * 11 - 16) # Test that global colormap works odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask, scale=.25, colormap='blues', norm=False, global_cm=True) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # Test that the most basic odf_slicer instanciation works odf_actor = actor.odf_slicer(odfs) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # Test that odf_slicer.display works properly scene.clear() scene.add(odf_actor) scene.add(actor.axes((11, 11, 11))) for i in range(11): odf_actor.display(i, None, None) if interactive: window.show(scene) for j in range(11): odf_actor.display(None, j, None) if interactive: window.show(scene) # With mask equal to zero everything should be black mask = np.zeros(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask, scale=.25, colormap='blues', norm=False, global_cm=True) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # global_cm=True with colormap=None should raise an error npt.assert_raises(IOError, actor.odf_slicer, odfs, sphere=None, mask=None, scale=.25, colormap=None, norm=False, global_cm=True) vertices2, faces2 = prim_sphere('repulsion200', True) sphere2 = Sphere() sphere2.vertices = vertices2 sphere2.faces = faces2 # Dimension mismatch between sphere vertices and number # of SF coefficients will raise an error. npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None, sphere=sphere2, scale=.25) # colormap=None and global_cm=False results in directionally encoded colors odf_actor = actor.odf_slicer(odfs, sphere=None, mask=None, scale=.25, colormap=None, norm=False, global_cm=False) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) del odf_actor del odfs
def test_sdf_actor(interactive=False): scene = window.Scene() scene.background((1, 1, 1)) centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0], [2, 2, 0]]) * 11 colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]]) directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 1, 0]]) scales = [1, 2, 3, 4] primitive = ['sphere', 'ellipsoid', 'torus', 'capsule'] sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 4) # Draw 3 spheres as the primitive type is str scene.clear() primitive = 'sphere' sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 4) # A sphere and default back to two torus # as the primitive type is list scene.clear() primitive = ['sphere'] with npt.assert_warns(UserWarning): sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 4) # One sphere and ellipsoid each # Default to torus scene.clear() primitive = ['sphere', 'ellipsoid'] with npt.assert_warns(UserWarning): sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 4)
def initialize(self): # Bring used components self.registerVtkWebProtocol(protocols.vtkWebMouseHandler()) self.registerVtkWebProtocol(protocols.vtkWebViewPort()) # Image delivery # 1. Original method where the client ask for each image individually #self.registerVtkWebProtocol(protocols.vtkWebViewPortImageDelivery()) # 2. Improvement on the initial protocol to allow images to be pushed # from the server without any client request (i.e.: animation, LOD, …) self.registerVtkWebProtocol( protocols.vtkWebPublishImageDelivery(decode=False)) # Protocol for sending geometry for the vtk.js synchronized render # window # For local rendering using vtk.js #self.registerVtkWebProtocol(protocols.vtkWebViewPortGeometryDelivery()) #self.registerVtkWebProtocol(protocols.vtkWebLocalRendering()) # Custom API self.registerVtkWebProtocol(FuryProtocol()) # Tell the C++ web app to use no encoding. # ParaViewWebPublishImageDelivery must be set to decode=False to match. # RAW instead of base64 self.getApplication().SetImageEncoding(0) # Update authentication key to use self.updateSecret(_WebSpheres.authKey) # Create default pipeline (Only once for all the session) if not _WebSpheres.view: # FURY specific code scene = window.Scene() scene.background((1, 1, 1)) n_points = 10000 translate = 100 centers = translate * np.random.rand(n_points, 3) - translate / 2 colors = 255 * np.random.rand(n_points, 3) radius = np.random.rand(n_points) fake_sphere = \ """ float len = length(point); float radius = 1.; if(len > radius) {discard;} vec3 normalizedPoint = normalize(vec3(point.xy, sqrt(1. - len))); vec3 direction = normalize(vec3(1., 1., 1.)); float df_1 = max(0, dot(direction, normalizedPoint)); float sf_1 = pow(df_1, 24); fragOutput0 = vec4(max(df_1 * color, sf_1 * vec3(1)), 1); """ spheres_actor = actor.billboard(centers, colors=colors, scales=radius, fs_impl=fake_sphere) scene.add(spheres_actor) scene.add(actor.axes()) showm = window.ShowManager(scene) # For debugging purposes #showm.render() ren_win = showm.window ren_win_interactor = vtk.vtkRenderWindowInteractor() ren_win_interactor.SetRenderWindow(ren_win) ren_win_interactor.GetInteractorStyle().\ SetCurrentStyleToTrackballCamera() ren_win_interactor.EnableRenderOff() # VTK Web application specific _WebSpheres.view = ren_win self.getApplication().GetObjectIdMap().SetActiveObject( 'VIEW', ren_win)
def test_active_camera(): renderer = window.Renderer() renderer.add(actor.axes(scale=(1, 1, 1))) renderer.reset_camera() renderer.reset_clipping_range() direction = renderer.camera_direction() position, focal_point, view_up = renderer.get_camera() renderer.set_camera((0., 0., 1.), (0., 0., 0), view_up) position, focal_point, view_up = renderer.get_camera() npt.assert_almost_equal(np.dot(direction, position), -1) renderer.zoom(1.5) new_position, _, _ = renderer.get_camera() npt.assert_array_almost_equal(position, new_position) renderer.zoom(1) # rotate around focal point renderer.azimuth(90) position, _, _ = renderer.get_camera() npt.assert_almost_equal(position, (1.0, 0.0, 0)) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.colors_found, [True]) # rotate around camera's center renderer.yaw(90) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(0, 0, 0)]) npt.assert_equal(report.colors_found, [True]) renderer.yaw(-90) renderer.elevation(90) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=(0, 255, 0)) npt.assert_equal(report.colors_found, [True]) renderer.set_camera((0., 0., 1.), (0., 0., 0), view_up) # vertical rotation of the camera around the focal point renderer.pitch(10) renderer.pitch(-10) # rotate around the direction of projection renderer.roll(90) # inverted normalized distance from focal point along the direction # of the camera position, _, _ = renderer.get_camera() renderer.dolly(0.5) new_position, _, _ = renderer.get_camera() npt.assert_almost_equal(position[2], 0.5 * new_position[2])
def test_scene(): scene = window.Scene() npt.assert_equal(scene.size(), (0, 0)) # background color for scene (1, 0.5, 0) # 0.001 added here to remove numerical errors when moving from float # to int values bg_float = (1, 0.501, 0) # that will come in the image in the 0-255 uint scale bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8')) scene.background(bg_float) # window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color=bg_color, colors=[bg_color, (0, 127, 0)]) npt.assert_equal(report.objects, 0) npt.assert_equal(report.colors_found, [True, False]) axes = actor.axes() scene.add(axes) # window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) scene.rm(axes) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) scene.add(axes) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) scene.rm_all() arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) ren2 = window.Scene(bg_float) ren2.background((0, 0, 0.)) report = window.analyze_scene(ren2) npt.assert_equal(report.bg_color, (0, 0, 0)) ren2.add(axes) report = window.analyze_scene(ren2) npt.assert_equal(report.actors, 3) ren2.rm(axes) report = window.analyze_scene(ren2) npt.assert_equal(report.actors, 0) with captured_output() as (out, err): scene.camera_info() npt.assert_equal( out.getvalue().strip(), '# Active Camera\n ' 'Position (0.00, 0.00, 1.00)\n ' 'Focal Point (0.00, 0.00, 0.00)\n ' 'View Up (0.00, 1.00, 0.00)') npt.assert_equal(err.getvalue().strip(), '')
def test_manifest_pbr(interactive=False): scene = window.Scene() # Setup scene # Setup surface surface_actor = _generate_surface() material.manifest_pbr(surface_actor) scene.add(surface_actor) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) scene.clear() # Reset scene # Contour from roi setup data = np.zeros((50, 50, 50)) data[20:30, 25, 25] = 1. data[25, 20:30, 25] = 1. affine = np.eye(4) surface = actor.contour_from_roi(data, affine, color=np.array([1, 0, 1])) material.manifest_pbr(surface) scene.add(surface) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) scene.clear() # Reset scene # Streamtube setup data1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]]) data2 = data1 + np.array([0.5, 0., 0.]) data = [data1, data2] colors = np.array([[1, 0, 0], [0, 0, 1.]]) tubes = actor.streamtube(data, colors, linewidth=.1) material.manifest_pbr(tubes) scene.add(tubes) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 2) scene.clear() # Reset scene # Axes setup axes = actor.axes() material.manifest_pbr(axes) scene.add(axes) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) scene.clear() # Reset scene # ODF slicer setup if have_dipy: from dipy.data import get_sphere from tempfile import mkstemp sphere = get_sphere('symmetric362') shape = (11, 11, 11, sphere.vertices.shape[0]) fid, fname = mkstemp(suffix='_odf_slicer.mmap') odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape) odfs[:] = 1 affine = np.eye(4) mask = np.ones(odfs.shape[:3]) mask[:4, :4, :4] = 0 odfs[..., 0] = 1 odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere, scale=.25, colormap='blues') material.manifest_pbr(odf_actor) k = 5 I, J, _ = odfs.shape[:3] odf_actor.display_extent(0, I, 0, J, k, k) odf_actor.GetProperty().SetOpacity(1.0) scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 11 * 11) scene.clear() # Reset scene # Tensor slicer setup if have_dipy: from dipy.data import get_sphere sphere = get_sphere('symmetric724') evals = np.array([1.4, .35, .35]) * 10 ** (-3) evecs = np.eye(3) mevals = np.zeros((3, 2, 4, 3)) mevecs = np.zeros((3, 2, 4, 3, 3)) mevals[..., :] = evals mevecs[..., :, :] = evecs affine = np.eye(4) scene = window.Scene() tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine, sphere=sphere, scale=.3) material.manifest_pbr(tensor_actor) _, J, K = mevals.shape[:3] tensor_actor.display_extent(0, 1, 0, J, 0, K) scene.add(tensor_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 4) # TODO: Rotate to test # npt.assert_equal(report.objects, 4 * 2 * 2) scene.clear() # Reset scene # Point setup points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]]) colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) opacity = 0.5 points_actor = actor.point(points, colors, opacity=opacity) material.manifest_pbr(points_actor) scene.add(points_actor) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Sphere setup xyzr = np.array([[0, 0, 0, 10], [100, 0, 0, 25], [200, 0, 0, 50]]) colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]]) opacity = 0.5 sphere_actor = actor.sphere(centers=xyzr[:, :3], colors=colors[:], radii=xyzr[:, 3], opacity=opacity) material.manifest_pbr(sphere_actor) scene.add(sphere_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Advanced geometry actors setup (Arrow, cone, cylinder) xyz = np.array([[0, 0, 0], [50, 0, 0], [100, 0, 0]]) dirs = np.array([[0, 1, 0], [1, 0, 0], [0, 0.5, 0.5]]) colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [1, 1, 0, 1]]) heights = np.array([5, 7, 10]) actor_list = [[actor.cone, {'directions': dirs, 'resolution': 8}], [actor.arrow, {'directions': dirs, 'resolution': 9}], [actor.cylinder, {'directions': dirs}]] for act_func, extra_args in actor_list: aga_actor = act_func(centers=xyz, colors=colors[:], heights=heights, **extra_args) material.manifest_pbr(aga_actor) scene.add(aga_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Basic geometry actors (Box, cube, frustum, octagonalprism, rectangle, # square) centers = np.array([[4, 0, 0], [0, 4, 0], [0, 0, 0]]) colors = np.array([[1, 0, 0, 0.4], [0, 1, 0, 0.8], [0, 0, 1, 0.5]]) directions = np.array([[1, 1, 0]]) scale_list = [1, 2, (1, 1, 1), [3, 2, 1], np.array([1, 2, 3]), np.array([[1, 2, 3], [1, 3, 2], [3, 1, 2]])] actor_list = [[actor.box, {}], [actor.cube, {}], [actor.frustum, {}], [actor.octagonalprism, {}], [actor.rectangle, {}], [actor.square, {}]] for act_func, extra_args in actor_list: for scale in scale_list: scene = window.Scene() bga_actor = act_func(centers=centers, directions=directions, colors=colors, scales=scale, **extra_args) material.manifest_pbr(bga_actor) scene.add(bga_actor) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) msg = 'Failed with {}, scale={}'.format(act_func.__name__, scale) npt.assert_equal(report.objects, 3, err_msg=msg) scene.clear() # Cone setup using vertices centers = np.array([[0, 0, 0], [20, 0, 0], [40, 0, 0]]) directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]]) vertices = np.array([[0.0, 0.0, 0.0], [0.0, 10.0, 0.0], [10.0, 0.0, 0.0], [0.0, 0.0, 10.0]]) faces = np.array([[0, 1, 3], [0, 1, 2]]) cone_actor = actor.cone(centers=centers, directions=directions, colors=colors[:], vertices=vertices, faces=faces) material.manifest_pbr(cone_actor) scene.add(cone_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Superquadric setup centers = np.array([[8, 0, 0], [0, 8, 0], [0, 0, 0]]) colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) directions = np.random.rand(3, 3) scales = [1, 2, 3] roundness = np.array([[1, 1], [1, 2], [2, 1]]) sq_actor = actor.superquadric(centers, roundness=roundness, directions=directions, colors=colors.astype(np.uint8), scales=scales) material.manifest_pbr(sq_actor) scene.add(sq_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Label setup text_actor = actor.label("Hello") material.manifest_pbr(text_actor) scene.add(text_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 5) # NOTE: From this point on, these actors don't have full support for PBR # interpolation. This is, the test passes but there is no evidence of the # desired effect. """ # Line setup data1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]]) data2 = data1 + np.array([0.5, 0., 0.]) data = [data1, data2] colors = np.array([[1, 0, 0], [0, 0, 1.]]) lines = actor.line(data, colors, linewidth=5) material.manifest_pbr(lines) scene.add(lines) """ """ # Peak slicer setup _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4') # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape peak_dirs = np.zeros((11, 11, 11, 3, 3)) peak_dirs[:, :, :] = _peak_dirs peak_actor = actor.peak_slicer(peak_dirs) material.manifest_pbr(peak_actor) scene.add(peak_actor) """ """ # Dots setup points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]]) dots_actor = actor.dots(points, color=(0, 255, 0)) material.manifest_pbr(dots_actor) scene.add(dots_actor) """ """ # Texture setup arr = (255 * np.ones((512, 212, 4))).astype('uint8') arr[20:40, 20:40, :] = np.array([255, 0, 0, 255], dtype='uint8') tp2 = actor.texture(arr) material.manifest_pbr(tp2) scene.add(tp2) """ """ # Texture on sphere setup arr = 255 * np.ones((810, 1620, 3), dtype='uint8') rows, cols, _ = arr.shape rs = rows // 2 cs = cols // 2 w = 150 // 2 arr[rs - w: rs + w, cs - 10 * w: cs + 10 * w] = np.array([255, 127, 0]) tsa = actor.texture_on_sphere(arr) material.manifest_pbr(tsa) scene.add(tsa) """ """ # SDF setup centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) * 11 colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) scales = [1, 2, 3] primitive = ['sphere', 'ellipsoid', 'torus'] sdf_actor = actor.sdf(centers, directions=directions, colors=colors, primitives=primitive, scales=scales) material.manifest_pbr(sdf_actor) scene.add(sdf_actor) """ # NOTE: For these last set of actors, there is not support for PBR # interpolation at all. """ # Setup slicer data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine, value_range=[data.min(), data.max()]) slicer.display(None, None, 25) material.manifest_pbr(slicer) scene.add(slicer) """ """ # Contour from label setup data = np.zeros((50, 50, 50)) data[5:15, 1:10, 25] = 1. data[25:35, 1:10, 25] = 2. data[40:49, 1:10, 25] = 3. color = np.array([[255, 0, 0, 0.6], [0, 255, 0, 0.5], [0, 0, 255, 1.0]]) surface = actor.contour_from_label(data, color=color) material.manifest_pbr(surface) scene.add(surface) """ """ # Scalar bar setup lut = actor.colormap_lookup_table( scale_range=(0., 100.), hue_range=(0., 0.1), saturation_range=(1, 1), value_range=(1., 1)) sb_actor = actor.scalar_bar(lut, ' ') material.manifest_pbr(sb_actor) scene.add(sb_actor) """ """ # Billboard setup centers = np.array([[0, 0, 0], [5, -5, 5], [-7, 7, -7], [10, 10, 10], [10.5, 11.5, 11.5], [12, -12, -12], [-17, 17, 17], [-22, -22, 22]]) colors = np.array([[1, 1, 0], [0, 0, 0], [1, 0, 1], [0, 0, 1], [1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 1, 1]]) scales = [6, .4, 1.2, 1, .2, .7, 3, 2] """ fake_sphere = \ """ float len = length(point); float radius = 1.; if(len > radius) discard; vec3 normalizedPoint = normalize(vec3(point.xy, sqrt(1. - len))); vec3 direction = normalize(vec3(1., 1., 1.)); float df_1 = max(0, dot(direction, normalizedPoint)); float sf_1 = pow(df_1, 24); fragOutput0 = vec4(max(df_1 * color, sf_1 * vec3(1)), 1); """ """ billboard_actor = actor.billboard(centers, colors=colors, scales=scales, fs_impl=fake_sphere) material.manifest_pbr(billboard_actor) scene.add(billboard_actor) """ """ # Text3D setup msg = 'I \nlove\n FURY' txt_actor = actor.text_3d(msg) material.manifest_pbr(txt_actor) scene.add(txt_actor) """ """ # Figure setup arr = (255 * np.ones((512, 212, 4))).astype('uint8') arr[20:40, 20:40, 3] = 0 tp = actor.figure(arr) material.manifest_pbr(tp) scene.add(tp) """ if interactive: window.show(scene)
amplitude_x = 0.3 amplitude_y = 0.0 freq = 0.6 base_actor = actor.box(centers=np.array([[0, 0, 0]]), directions=np.array([[0, 0, 0]]), scales=(0.02, 0.02, 0.02), colors=np.array([[1, 0, 0]])) ############################################################################### # We add the necessary actors to the scene. scene = window.Scene() scene.background((1, 1, 1)) scene.set_camera((2.2, -3.0, 3.0), (-0.3, 0.6, 0.7), (-0.2, 0.2, 1.0)) scene.add(actor.axes(scale=(0.1, 0.1, 0.1))) scene.add(rope_actor) scene.add(base_actor) # Create show manager. showm = window.ShowManager(scene, size=(900, 768), reset_camera=False, order_transparent=True) showm.initialize() # Counter interator for tracking simulation steps. counter = itertools.count() ###############################################################################
def test_odf_slicer(interactive=False): sphere = get_sphere('symmetric362') shape = (11, 11, 11, sphere.vertices.shape[0]) fid, fname = mkstemp(suffix='_odf_slicer.mmap') print(fid) print(fname) odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape) odfs[:] = 1 affine = np.eye(4) renderer = window.Renderer() mask = np.ones(odfs.shape[:3]) mask[:4, :4, :4] = 0 odfs[..., 0] = 1 odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere, scale=.25, colormap='plasma') fa = 0. * np.zeros(odfs.shape[:3]) fa[:, 0, :] = 1. fa[:, -1, :] = 1. fa[0, :, :] = 1. fa[-1, :, :] = 1. fa[5, 5, 5] = 1 k = 5 I, J, K = odfs.shape[:3] fa_actor = actor.slicer(fa, affine) fa_actor.display_extent(0, I, 0, J, k, k) renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() odf_actor.display_extent(0, I, 0, J, k, k) odf_actor.GetProperty().SetOpacity(1.0) if interactive: window.show(renderer, reset_camera=False) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 11 * 11) renderer.clear() renderer.add(fa_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) mask[:] = 0 mask[5, 5, 5] = 1 fa[5, 5, 5] = 0 fa_actor = actor.slicer(fa, None) fa_actor.display(None, None, 5) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) renderer.clear() renderer.add(fa_actor) renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) renderer.clear() renderer.add(odf_actor) renderer.add(fa_actor) odfs[:, :, :] = 1 mask = np.ones(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) renderer.clear() renderer.add(odf_actor) renderer.add(fa_actor) renderer.add(actor.axes((11, 11, 11))) for i in range(11): odf_actor.display(i, None, None) fa_actor.display(i, None, None) if interactive: window.show(renderer) for j in range(11): odf_actor.display(None, j, None) fa_actor.display(None, j, None) if interactive: window.show(renderer) # with mask equal to zero everything should be black mask = np.zeros(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) renderer.clear() renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 1) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') del odf_actor odfs._mmap.close() del odfs os.close(fid) os.remove(fname)
def test_renderer(): ren = window.Renderer() npt.assert_equal(ren.size(), (0, 0)) # background color for renderer (1, 0.5, 0) # 0.001 added here to remove numerical errors when moving from float # to int values bg_float = (1, 0.501, 0) # that will come in the image in the 0-255 uint scale bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8')) ren.background(bg_float) # window.show(ren) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color=bg_color, colors=[bg_color, (0, 127, 0)]) npt.assert_equal(report.objects, 0) npt.assert_equal(report.colors_found, [True, False]) axes = actor.axes() ren.add(axes) # window.show(ren) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) ren.rm(axes) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) window.add(ren, axes) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) ren.rm_all() arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) ren2 = window.Renderer(bg_float) ren2.background((0, 0, 0.)) report = window.analyze_renderer(ren2) npt.assert_equal(report.bg_color, (0, 0, 0)) ren2.add(axes) report = window.analyze_renderer(ren2) npt.assert_equal(report.actors, 3) window.rm(ren2, axes) report = window.analyze_renderer(ren2) npt.assert_equal(report.actors, 0)