def test_bundle_maps(): scene = window.Scene() bundle = fornix_streamlines() bundle, shift = center_streamlines(bundle) mat = np.array([[1, 0, 0, 100], [0, 1, 0, 100], [0, 0, 1, 100], [0, 0, 0, 1.]]) bundle = transform_streamlines(bundle, mat) # metric = np.random.rand(*(200, 200, 200)) metric = 100 * np.ones((200, 200, 200)) # add lower values metric[100, :, :] = 100 * 0.5 # create a nice orange-red colormap lut = actor.colormap_lookup_table(scale_range=(0., 100.), hue_range=(0., 0.1), saturation_range=(1, 1), value_range=(1., 1)) line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut) scene.add(line) scene.add(actor.scalar_bar(lut, ' ')) report = window.analyze_scene(scene) npt.assert_almost_equal(report.actors, 1) # window.show(scene) scene.clear() nb_points = np.sum([len(b) for b in bundle]) values = 100 * np.random.rand(nb_points) # values[:nb_points/2] = 0 line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut) scene.add(line) # window.show(scene) report = window.analyze_scene(scene) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') scene.clear() colors = np.random.rand(nb_points, 3) # values[:nb_points/2] = 0 line = actor.line(bundle, colors, linewidth=2) scene.add(line) # window.show(scene) report = window.analyze_scene(scene) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') # window.show(scene) arr = window.snapshot(scene) report2 = window.analyze_snapshot(arr) npt.assert_equal(report2.objects, 1) # try other input options for colors scene.clear() actor.line(bundle, (1., 0.5, 0)) actor.line(bundle, np.arange(len(bundle))) actor.line(bundle) colors = [np.random.rand(*b.shape) for b in bundle] actor.line(bundle, colors=colors)
def test_scene(): scene = window.Scene() npt.assert_equal(scene.size(), (0, 0)) # background color for scene (1, 0.5, 0) # 0.001 added here to remove numerical errors when moving from float # to int values bg_float = (1, 0.501, 0) # that will come in the image in the 0-255 uint scale bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8')) scene.background(bg_float) # window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color=bg_color, colors=[bg_color, (0, 127, 0)]) npt.assert_equal(report.objects, 0) npt.assert_equal(report.colors_found, [True, False]) axes = actor.axes() scene.add(axes) # window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) scene.rm(axes) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) scene.add(axes) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) scene.rm_all() arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) ren2 = window.Scene(bg_float) ren2.background((0, 0, 0.)) report = window.analyze_scene(ren2) npt.assert_equal(report.bg_color, (0, 0, 0)) ren2.add(axes) report = window.analyze_scene(ren2) npt.assert_equal(report.actors, 3) ren2.rm(axes) report = window.analyze_scene(ren2) npt.assert_equal(report.actors, 0) with captured_output() as (out, err): scene.camera_info() npt.assert_equal( out.getvalue().strip(), '# Active Camera\n ' 'Position (0.00, 0.00, 1.00)\n ' 'Focal Point (0.00, 0.00, 0.00)\n ' 'View Up (0.00, 1.00, 0.00)') npt.assert_equal(err.getvalue().strip(), '')
def test_slicer(): scene = window.Scene() data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine) slicer.display(None, None, 25) scene.add(slicer) scene.reset_camera() scene.reset_clipping_range() # window.show(scene) # copy pixels in numpy array directly arr = window.snapshot(scene, 'test_slicer.png', offscreen=True) import scipy print(scipy.__version__) print(scipy.__file__) print(arr.sum()) print(np.sum(arr == 0)) print(np.sum(arr > 0)) print(arr.shape) print(arr.dtype) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) # print(arr[..., 0]) # The slicer can cut directly a smaller part of the image slicer.display_extent(10, 30, 10, 30, 35, 35) scene.ResetCamera() scene.add(slicer) # save pixels in png file not a numpy array with InTemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, 'slice.png') # window.show(scene) window.snapshot(scene, fname, offscreen=True) report = window.analyze_snapshot(fname, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_raises(ValueError, actor.slicer, np.ones(10)) scene.clear() rgb = np.zeros((30, 30, 30, 3)) rgb[..., 0] = 1. rgb_actor = actor.slicer(rgb) scene.add(rgb_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene, offscreen=True) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.objects, 1) npt.assert_equal(report.colors_found, [True]) lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) scene.clear() slicer_lut = actor.slicer(data, lookup_colormap=lut) slicer_lut.display(10, None, None) slicer_lut.display(None, 10, None) slicer_lut.display(None, None, 10) slicer_lut.opacity(0.5) slicer_lut.tolerance(0.03) slicer_lut2 = slicer_lut.copy() npt.assert_equal(slicer_lut2.GetOpacity(), 0.5) npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03) slicer_lut2.opacity(1) slicer_lut2.tolerance(0.025) slicer_lut2.display(None, None, 10) scene.add(slicer_lut2) scene.reset_clipping_range() arr = window.snapshot(scene, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) scene.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) slicer = actor.slicer(data, affine, interpolation='nearest') slicer.display(None, None, 25) scene.add(slicer) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_equal(data.shape, slicer.shape) scene.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) from dipy.align.reslice import reslice data2, affine2 = reslice(data, affine, zooms=(1, 3, 2), new_zooms=(1, 1, 1)) slicer = actor.slicer(data2, affine2, interpolation='linear') slicer.display(None, None, 25) scene.add(slicer) scene.reset_camera() scene.reset_clipping_range() # window.show(scene, reset_camera=False) arr = window.snapshot(scene, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_array_equal([1, 3, 2] * np.array(data.shape), np.array(slicer.shape))
def test_text_block_2d_justification(): window_size = (700, 700) show_manager = window.ShowManager(size=window_size) # To help visualize the text positions. grid_size = (500, 500) bottom, middle, top = 50, 300, 550 left, center, right = 50, 300, 550 line_color = (1, 0, 0) grid_top = (center, top), (grid_size[0], 1) grid_bottom = (center, bottom), (grid_size[0], 1) grid_left = (left, middle), (1, grid_size[1]) grid_right = (right, middle), (1, grid_size[1]) grid_middle = (center, middle), (grid_size[0], 1) grid_center = (center, middle), (1, grid_size[1]) grid_specs = [ grid_top, grid_bottom, grid_left, grid_right, grid_middle, grid_center ] for spec in grid_specs: line = ui.Rectangle2D(size=spec[1], color=line_color) line.center = spec[0] show_manager.ren.add(line) font_size = 60 bg_color = (1, 1, 1) texts = [] texts += [ ui.TextBlock2D("HH", position=(left, top), font_size=font_size, color=(1, 0, 0), bg_color=bg_color, justification="left", vertical_justification="top") ] texts += [ ui.TextBlock2D("HH", position=(center, top), font_size=font_size, color=(0, 1, 0), bg_color=bg_color, justification="center", vertical_justification="top") ] texts += [ ui.TextBlock2D("HH", position=(right, top), font_size=font_size, color=(0, 0, 1), bg_color=bg_color, justification="right", vertical_justification="top") ] texts += [ ui.TextBlock2D("HH", position=(left, middle), font_size=font_size, color=(1, 1, 0), bg_color=bg_color, justification="left", vertical_justification="middle") ] texts += [ ui.TextBlock2D("HH", position=(center, middle), font_size=font_size, color=(0, 1, 1), bg_color=bg_color, justification="center", vertical_justification="middle") ] texts += [ ui.TextBlock2D("HH", position=(right, middle), font_size=font_size, color=(1, 0, 1), bg_color=bg_color, justification="right", vertical_justification="middle") ] texts += [ ui.TextBlock2D("HH", position=(left, bottom), font_size=font_size, color=(0.5, 0, 1), bg_color=bg_color, justification="left", vertical_justification="bottom") ] texts += [ ui.TextBlock2D("HH", position=(center, bottom), font_size=font_size, color=(1, 0.5, 0), bg_color=bg_color, justification="center", vertical_justification="bottom") ] texts += [ ui.TextBlock2D("HH", position=(right, bottom), font_size=font_size, color=(0, 1, 0.5), bg_color=bg_color, justification="right", vertical_justification="bottom") ] show_manager.ren.add(*texts) # Uncomment this to start the visualisation # show_manager.start() arr = window.snapshot(show_manager.ren, size=window_size, offscreen=True)
def test_active_camera(): scene = window.Scene() scene.add(actor.axes(scale=(1, 1, 1))) scene.reset_camera() scene.reset_clipping_range() direction = scene.camera_direction() position, focal_point, view_up = scene.get_camera() scene.set_camera((0., 0., 1.), (0., 0., 0), view_up) position, focal_point, view_up = scene.get_camera() npt.assert_almost_equal(np.dot(direction, position), -1) scene.zoom(1.5) new_position, _, _ = scene.get_camera() npt.assert_array_almost_equal(position, new_position) scene.zoom(1) # rotate around focal point scene.azimuth(90) position, _, _ = scene.get_camera() npt.assert_almost_equal(position, (1.0, 0.0, 0)) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.colors_found, [True]) # rotate around camera's center scene.yaw(90) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=[(0, 0, 0)]) npt.assert_equal(report.colors_found, [True]) scene.yaw(-90) scene.elevation(90) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=(0, 255, 0)) npt.assert_equal(report.colors_found, [True]) scene.set_camera((0., 0., 1.), (0., 0., 0), view_up) # vertical rotation of the camera around the focal point scene.pitch(10) scene.pitch(-10) # rotate around the direction of projection scene.roll(90) # inverted normalized distance from focal point along the direction # of the camera position, _, _ = scene.get_camera() scene.dolly(0.5) new_position, focal_point, view_up = scene.get_camera() npt.assert_almost_equal(position[2], 0.5 * new_position[2]) cam = scene.camera() npt.assert_equal(new_position, cam.GetPosition()) npt.assert_equal(focal_point, cam.GetFocalPoint()) npt.assert_equal(view_up, cam.GetViewUp())
def test_sdf_actor(interactive=False): scene = window.Scene() scene.background((1, 1, 1)) centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0], [2, 2, 0]]) * 11 colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]]) directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 1, 0]]) scales = [1, 2, 3, 4] primitive = ['sphere', 'ellipsoid', 'torus', 'capsule'] sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 4) # Draw 3 spheres as the primitive type is str scene.clear() primitive = 'sphere' sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 4) # A sphere and default back to two torus # as the primitive type is list scene.clear() primitive = ['sphere'] with npt.assert_warns(UserWarning): sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 4) # One sphere and ellipsoid each # Default to torus scene.clear() primitive = ['sphere', 'ellipsoid'] with npt.assert_warns(UserWarning): sdf_actor = actor.sdf(centers, directions, colors, primitive, scales) scene.add(sdf_actor) scene.add(actor.axes()) if interactive: window.show(scene) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 4)
def test_scene(): scene = window.Scene() # Scene size test npt.assert_equal(scene.size(), (0, 0)) # Color background test # Background color for scene (1, 0.5, 0). 0.001 added here to remove # numerical errors when moving from float to int values bg_float = (1, 0.501, 0) # That will come in the image in the 0-255 uint scale bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8')) scene.background(bg_float) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color=bg_color, colors=[bg_color, (0, 127, 0)]) npt.assert_equal(report.objects, 0) npt.assert_equal(report.colors_found, [True, False]) # Add actor to scene to test the remove actor function by analyzing a # snapshot axes = actor.axes() scene.add(axes) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) # Test remove actor function by analyzing a snapshot scene.rm(axes) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) # Add actor to scene to test the remove all actors function by analyzing a # snapshot scene.add(axes) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) # Test remove all actors function by analyzing a snapshot scene.rm_all() arr = window.snapshot(scene) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) # Test change background color from scene by analyzing the scene ren2 = window.Scene(bg_float) ren2.background((0, 0, 0.)) report = window.analyze_scene(ren2) npt.assert_equal(report.bg_color, (0, 0, 0)) # Add actor to scene to test the remove actor function by analyzing the # scene ren2.add(axes) report = window.analyze_scene(ren2) npt.assert_equal(report.actors, 1) # Test remove actor function by analyzing the scene ren2.rm(axes) report = window.analyze_scene(ren2) npt.assert_equal(report.actors, 0) # Test camera information retrieving with captured_output() as (out, err): scene.camera_info() npt.assert_equal(out.getvalue().strip(), '# Active Camera\n ' 'Position (0.00, 0.00, 1.00)\n ' 'Focal Point (0.00, 0.00, 0.00)\n ' 'View Up (0.00, 1.00, 0.00)') npt.assert_equal(err.getvalue().strip(), '') # Test skybox scene = window.Scene() npt.assert_equal(scene.GetUseImageBasedLighting(), False) npt.assert_equal(scene.GetAutomaticLightCreation(), 1) npt.assert_equal(scene.GetSphericalHarmonics(), None) npt.assert_equal(scene.GetEnvironmentTexture(), None) test_tex = Texture() scene = window.Scene(skybox=test_tex) npt.assert_equal(scene.GetUseImageBasedLighting(), True) npt.assert_equal(scene.GetAutomaticLightCreation(), 0) npt.assert_equal(scene.GetSphericalHarmonics(), None) npt.assert_equal(scene.GetEnvironmentTexture(), test_tex) # Test automatically shown skybox test_tex = Texture() test_tex.CubeMapOn() checker_arr = np.array([[1, 0], [0, 1]], dtype=np.uint8) * 255 for i in range(6): vtk_img = ImageData() vtk_img.SetDimensions(2, 2, 1) img_arr = np.zeros((2, 2, 3), dtype=np.uint8) img_arr[:, :, i // 2] = checker_arr vtk_arr = numpy_support.numpy_to_vtk(img_arr.reshape((-1, 3), order='F')) vtk_arr.SetName('Image') img_point_data = vtk_img.GetPointData() img_point_data.AddArray(vtk_arr) img_point_data.SetActiveScalars('Image') test_tex.SetInputDataObject(i, vtk_img) scene = window.Scene(skybox=test_tex) report = window.analyze_scene(scene) npt.assert_equal(report.actors, 1) ss = window.snapshot(scene) npt.assert_array_equal(ss[75, 75, :], [0, 0, 255]) npt.assert_array_equal(ss[75, 225, :], [0, 0, 0]) scene.yaw(90) ss = window.snapshot(scene) npt.assert_array_equal(ss[75, 75, :], [255, 0, 0]) npt.assert_array_equal(ss[75, 225, :], [0, 0, 0]) scene.pitch(90) ss = window.snapshot(scene) npt.assert_array_equal(ss[75, 75, :], [0, 0, 0]) npt.assert_array_equal(ss[75, 225, :], [0, 255, 0])
def test_odf_slicer(interactive=False): # Prepare our data sphere = get_sphere('repulsion100') shape = (11, 11, 11, sphere.vertices.shape[0]) odfs = np.ones(shape) affine = np.array([[2.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, 3.0], [0.0, 0.0, 2.0, 1.0], [0.0, 0.0, 0.0, 1.0]]) mask = np.ones(odfs.shape[:3], bool) mask[:4, :4, :4] = False # Test that affine and mask work odf_actor = actor.odf_slicer(odfs, sphere=sphere, affine=affine, mask=mask, scale=.25, colormap='blues') k = 2 I, J, _ = odfs.shape[:3] odf_actor.display_extent(0, I - 1, 0, J - 1, k, k) scene = window.Scene() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene, reset_camera=False) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 11 * 11 - 16) # Test that global colormap works odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask, scale=.25, colormap='blues', norm=False, global_cm=True) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # Test that the most basic odf_slicer instanciation works odf_actor = actor.odf_slicer(odfs) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # Test that odf_slicer.display works properly scene.clear() scene.add(odf_actor) scene.add(actor.axes((11, 11, 11))) for i in range(11): odf_actor.display(i, None, None) if interactive: window.show(scene) for j in range(11): odf_actor.display(None, j, None) if interactive: window.show(scene) # With mask equal to zero everything should be black mask = np.zeros(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask, scale=.25, colormap='blues', norm=False, global_cm=True) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # global_cm=True with colormap=None should raise an error npt.assert_raises(IOError, actor.odf_slicer, odfs, sphere=sphere, mask=None, scale=.25, colormap=None, norm=False, global_cm=True) # Dimension mismatch between sphere vertices and number # of SF coefficients will raise an error. npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None, sphere=get_sphere('repulsion200'), scale=.25) # colormap=None and global_cm=False results in directionally encoded colors odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=None, scale=.25, colormap=None, norm=False, global_cm=False) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # Test that SH coefficients input works B = sh_to_sf_matrix(sphere, return_inv=False) odfs = np.zeros((11, 11, 11, B.shape[0])) odfs[..., 0] = 1.0 odf_actor = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # Dimension mismatch between sphere vertices and dimension of # B matrix will raise an error. npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None, sphere=get_sphere('repulsion200')) # Test that constant colormap color works. Also test that sphere # normals are oriented correctly. Will show purple spheres with # a white contour. odf_contour = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B, colormap=(255, 255, 255)) odf_contour.GetProperty().SetAmbient(1.0) odf_contour.GetProperty().SetFrontfaceCulling(True) odf_actor = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B, colormap=(255, 0, 255), scale=0.4) scene.clear() scene.add(odf_contour) scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # Test that we can change the sphere on an active actor new_sphere = get_sphere('symmetric362') new_B = sh_to_sf_matrix(new_sphere, return_inv=False) odf_actor.update_sphere(new_sphere.vertices, new_sphere.faces, new_B) if interactive: window.show(scene) del odf_actor del odfs
def plot_proj_shell(ms, use_sym=True, use_sphere=True, same_color=False, rad=0.025, opacity=1.0, ofile=None, ores=(300, 300)): """ Plot each shell Parameters ---------- ms: list of numpy.ndarray bvecs for each bvalue use_sym: boolean Plot symmetrical vectors use_sphere: boolean rendering of the sphere same_color: boolean use same color for all shell rad: float radius of each point opacity: float opacity for the shells ofile: str output filename ores: tuple resolution of the output png Return ------ """ global vtkcolors if len(ms) > 10: vtkcolors = fury.colormap.distinguishable_colormap(nb_colors=len(ms)) scene = window.Scene() scene.SetBackground(1, 1, 1) if use_sphere: sphere = get_sphere('symmetric724') shape = (1, 1, 1, sphere.vertices.shape[0]) fid, fname = mkstemp(suffix='_odf_slicer.mmap') odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape) odfs[:] = 1 odfs[..., 0] = 1 affine = np.eye(4) sphere_actor = actor.odf_slicer(odfs, affine, sphere=sphere, colormap='winter', scale=1.0, opacity=opacity) scene.add(sphere_actor) for i, shell in enumerate(ms): if same_color: i = 0 pts_actor = actor.point(shell, vtkcolors[i], point_radius=rad) scene.add(pts_actor) if use_sym: pts_actor = actor.point(-shell, vtkcolors[i], point_radius=rad) scene.add(pts_actor) window.show(scene) if ofile: window.snapshot(scene, fname=ofile + '.png', size=ores)
def test_manifest_standard(): # Test non-supported property test_actor = actor.text_3d('Test') npt.assert_warns(UserWarning, material.manifest_standard, test_actor) center = np.array([[0, 0, 0]]) # Test non-supported interpolation method test_actor = actor.square(center, directions=(1, 1, 1), colors=(0, 0, 1)) npt.assert_warns(UserWarning, material.manifest_standard, test_actor, interpolation='test') scene = window.Scene() # Setup scene test_actor = actor.box(center, directions=(1, 1, 1), colors=(0, 0, 1), scales=1) scene.add(test_actor) # scene.reset_camera() # window.show(scene) ss = window.snapshot(scene, size=(200, 200)) actual = ss[75, 100, :] / 1000 desired = np.array([0, 0, 201]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 desired = np.array([0, 0, 85]) / 1000 # TODO: check if camera affects this assert # npt.assert_array_almost_equal(actual, desired, decimal=2) # Test ambient level material.manifest_standard(test_actor, ambient_level=1) ss = window.snapshot(scene, size=(200, 200)) actual = ss[75, 100, :] / 1000 desired = np.array([0, 0, 255]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 125, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) # Test ambient color material.manifest_standard(test_actor, ambient_level=.5, ambient_color=(1, 0, 0)) ss = window.snapshot(scene, size=(200, 200)) actual = ss[75, 100, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 125, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 desired = np.array([0, 0, 212]) / 1000 # TODO: check what affects this # npt.assert_array_almost_equal(actual, desired, decimal=2) # Test diffuse level material.manifest_standard(test_actor, diffuse_level=.75) ss = window.snapshot(scene, size=(200, 200)) actual = ss[75, 100, :] / 1000 desired = np.array([0, 0, 151]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 125, :] / 1000 desired = np.array([0, 0, 110]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 desired = np.array([0, 0, 151]) / 1000 # TODO: check what affects this # npt.assert_array_almost_equal(actual, desired, decimal=2) # Test diffuse color material.manifest_standard(test_actor, diffuse_level=.5, diffuse_color=(1, 0, 0)) ss = window.snapshot(scene, size=(200, 200)) actual = ss[75, 100, :] / 1000 desired = np.array([0, 0, 101]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 125, :] / 1000 desired = np.array([0, 0, 74]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) # Test specular level material.manifest_standard(test_actor, specular_level=1) ss = window.snapshot(scene, size=(200, 200)) actual = ss[75, 100, :] / 1000 desired = np.array([201, 201, 255]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 125, :] / 1000 desired = np.array([147, 147, 255]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) # Test specular power material.manifest_standard(test_actor, specular_level=1, specular_power=5) ss = window.snapshot(scene, size=(200, 200)) actual = ss[75, 100, :] / 1000 desired = np.array([78, 78, 255]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 125, :] / 1000 desired = np.array([16, 16, 163]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) # Test specular color material.manifest_standard(test_actor, specular_level=1, specular_color=(1, 0, 0), specular_power=5) ss = window.snapshot(scene, size=(200, 200)) actual = ss[75, 100, :] / 1000 desired = np.array([78, 0, 201]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 75, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[125, 125, :] / 1000 desired = np.array([16, 0, 147]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) scene = window.Scene() # Special case: Contour from roi data = np.zeros((50, 50, 50)) data[20:30, 25, 25] = 1. data[25, 20:30, 25] = 1. test_actor = actor.contour_from_roi(data, color=np.array([1, 0, 1])) scene.add(test_actor) ss = window.snapshot(scene, size=(200, 200)) actual = ss[100, 106, :] / 1000 desired = np.array([253, 0, 253]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[100, 150, :] / 1000 desired = np.array([180, 0, 180]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) material.manifest_standard(test_actor) ss = window.snapshot(scene, size=(200, 200)) actual = ss[100, 106, :] / 1000 desired = np.array([253, 253, 253]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[100, 150, :] / 1000 desired = np.array([180, 180, 180]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) material.manifest_standard(test_actor, diffuse_color=(1, 0, 1)) ss = window.snapshot(scene, size=(200, 200)) actual = ss[100, 106, :] / 1000 desired = np.array([253, 0, 253]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[100, 150, :] / 1000 desired = np.array([180, 0, 180]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2)
def test_manifest_pbr_vtk(): # Test non-supported property test_actor = actor.text_3d('Test') npt.assert_warns(UserWarning, material.manifest_pbr, test_actor) # Test non-supported PBR interpolation test_actor = actor.scalar_bar() npt.assert_warns(UserWarning, material.manifest_pbr, test_actor) # Create tmp dir to save and query images # with TemporaryDirectory() as out_dir: # tmp_fname = os.path.join(out_dir, 'tmp_img.png') # Tmp image to test scene = window.Scene() # Setup scene test_actor = actor.square(np.array([[0, 0, 0]]), directions=(0, 0, 0), colors=(0, 0, 1)) scene.add(test_actor) # Test basic actor # window.record(scene, out_path=tmp_fname, size=(200, 200), # reset_camera=True) ss = window.snapshot(scene, size=(200, 200)) # npt.assert_equal(os.path.exists(tmp_fname), True) # ss = load_image(tmp_fname) actual = ss[100, 100, :] / 1000 desired = np.array([0, 0, 255]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[40, 40, :] / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) # Test default parameters material.manifest_pbr(test_actor) ss = window.snapshot(scene, size=(200, 200)) # window.record(scene, out_path=tmp_fname, size=(200, 200), # reset_camera=True) # npt.assert_equal(os.path.exists(tmp_fname), True) # ss = load_image(tmp_fname) actual = ss[100, 100, :] / 1000 desired = np.array([66, 66, 165]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[40, 40, :] / 1000 desired = np.array([40, 40, 157]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) # Test roughness material.manifest_pbr(test_actor, roughness=0) ss = window.snapshot(scene, size=(200, 200)) # window.record(scene, out_path=tmp_fname, size=(200, 200), # reset_camera=True) # npt.assert_equal(os.path.exists(tmp_fname), True) # ss = load_image(tmp_fname) actual = ss[100, 100, :] / 1000 desired = np.array([0, 0, 155]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[40, 40, :] / 1000 desired = np.array([0, 0, 153]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) # Test metallicity material.manifest_pbr(test_actor, metallic=1) ss = window.snapshot(scene, size=(200, 200)) # window.record(scene, out_path=tmp_fname, size=(200, 200), # reset_camera=True) # npt.assert_equal(os.path.exists(tmp_fname), True) # ss = load_image(tmp_fname) actual = ss[100, 100, :] / 1000 desired = np.array([0, 0, 255]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2) actual = ss[40, 40, :] / 1000 desired = np.array([0, 0, 175]) / 1000 npt.assert_array_almost_equal(actual, desired, decimal=2)
def screenshot_fa_peaks(fa, peaks, directory='.'): """ Compute 3 view screenshot with peaks on FA. Parameters ---------- fa : string FA filename. peaks : string Peak filename. directory : string Directory to save the mosaic. Returns ------- name : string Path of the mosaic """ slice_name = ['sagittal', 'coronal', 'axial'] data = nib.load(fa).get_data() evecs_data = nib.load(peaks).get_data() evecs = np.zeros(data.shape + (1, 3)) evecs[:, :, :, 0, :] = evecs_data[...] middle = [data.shape[0] // 2 + 4, data.shape[1] // 2, data.shape[2] // 2] slice_display = [(middle[0], None, None), (None, middle[1], None), (None, None, middle[2])] concat = [] for j, slice_name in enumerate(slice_name): image_name = os.path.basename(str(peaks)).split(".")[0] name = os.path.join(directory, image_name + '.png') slice_actor = actor.slicer(data, interpolation='nearest', opacity=0.3) peak_actor = actor.peak_slicer(evecs, colors=None) peak_actor.GetProperty().SetLineWidth(2.5) slice_actor.display(slice_display[j][0], slice_display[j][1], slice_display[j][2]) peak_actor.display(slice_display[j][0], slice_display[j][1], slice_display[j][2]) renderer = window.ren() renderer.add(slice_actor) renderer.add(peak_actor) center = slice_actor.GetCenter() pos = None viewup = None if slice_name == "sagittal": pos = (center[0] - 350, center[1], center[2]) viewup = (0, 0, -1) elif slice_name == "coronal": pos = (center[0], center[1] + 350, center[2]) viewup = (0, 0, -1) elif slice_name == "axial": pos = (center[0], center[1], center[2] + 350) viewup = (0, -1, 1) camera = renderer.GetActiveCamera() camera.SetViewUp(viewup) camera.SetPosition(pos) camera.SetFocalPoint(center) img = window.snapshot(renderer, size=(1080, 1080), offscreen=True) if len(concat) == 0: concat = img else: concat = np.hstack((concat, img)) imgs_comb = Image.fromarray(concat) imgs_comb.save(name) return name
def render( self, tractogram: Tractogram = None, filename: str = None ): """ Render the streamlines, either directly or through a file Might render from "outside" the environment, like for comet Parameters: ----------- tractogram: Tractogram, optional Object containing the streamlines and seeds path: str, optional If set, save the image at the specified location instead of displaying directly """ from fury import window, actor # Might be rendering from outside the environment if tractogram is None: tractogram = Tractogram( streamlines=self.streamlines[:, :self.length], data_per_streamline={ 'seeds': self.starting_points }) # Reshape peaks for displaying X, Y, Z, M = self.peaks.data.shape peaks = np.reshape(self.peaks.data, (X, Y, Z, 5, M//5)) # Setup scene and actors scene = window.Scene() stream_actor = actor.streamtube(tractogram.streamlines) peak_actor = actor.peak_slicer(peaks, np.ones((X, Y, Z, M)), colors=(0.2, 0.2, 1.), opacity=0.5) dot_actor = actor.dots(tractogram.data_per_streamline['seeds'], color=(1, 1, 1), opacity=1, dot_size=2.5) scene.add(stream_actor) scene.add(peak_actor) scene.add(dot_actor) scene.reset_camera_tight(0.95) # Save or display scene if filename is not None: directory = os.path.dirname(pjoin(self.experiment_path, 'render')) if not os.path.exists(directory): os.makedirs(directory) dest = pjoin(directory, filename) window.snapshot( scene, fname=dest, offscreen=True, size=(800, 800)) else: showm = window.ShowManager(scene, reset_camera=True) showm.initialize() showm.start()
def test_manifest_standard(interactive=False): scene = window.Scene() # Setup scene # Setup surface surface_actor = _generate_surface() material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(surface_actor) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) scene.clear() # Reset scene # Contour from roi setup data = np.zeros((50, 50, 50)) data[20:30, 25, 25] = 1. data[25, 20:30, 25] = 1. affine = np.eye(4) surface = actor.contour_from_roi(data, affine, color=np.array([1, 0, 1])) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(surface) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) scene.clear() # Reset scene # Contour from label setup data = np.zeros((50, 50, 50)) data[5:15, 1:10, 25] = 1. data[25:35, 1:10, 25] = 2. data[40:49, 1:10, 25] = 3. color = np.array([[255, 0, 0, 0.6], [0, 255, 0, 0.5], [0, 0, 255, 1.0]]) surface = actor.contour_from_label(data, color=color) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(surface) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Streamtube setup data1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]]) data2 = data1 + np.array([0.5, 0., 0.]) data = [data1, data2] colors = np.array([[1, 0, 0], [0, 0, 1.]]) tubes = actor.streamtube(data, colors, linewidth=.1) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(tubes) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 2) scene.clear() # Reset scene # ODF slicer setup if have_dipy: from dipy.data import get_sphere from tempfile import mkstemp sphere = get_sphere('symmetric362') shape = (11, 11, 11, sphere.vertices.shape[0]) fid, fname = mkstemp(suffix='_odf_slicer.mmap') odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape) odfs[:] = 1 affine = np.eye(4) mask = np.ones(odfs.shape[:3]) mask[:4, :4, :4] = 0 odfs[..., 0] = 1 odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere, scale=.25, colormap='blues') material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) k = 5 I, J, _ = odfs.shape[:3] odf_actor.display_extent(0, I, 0, J, k, k) odf_actor.GetProperty().SetOpacity(1.0) scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 11 * 11) scene.clear() # Reset scene # Tensor slicer setup if have_dipy: from dipy.data import get_sphere sphere = get_sphere('symmetric724') evals = np.array([1.4, .35, .35]) * 10 ** (-3) evecs = np.eye(3) mevals = np.zeros((3, 2, 4, 3)) mevecs = np.zeros((3, 2, 4, 3, 3)) mevals[..., :] = evals mevecs[..., :, :] = evecs affine = np.eye(4) scene = window.Scene() tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine, sphere=sphere, scale=.3) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) _, J, K = mevals.shape[:3] tensor_actor.display_extent(0, 1, 0, J, 0, K) scene.add(tensor_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 4) scene.clear() # Reset scene # Point setup points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]]) colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) opacity = 0.5 points_actor = actor.point(points, colors, opacity=opacity) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(points_actor) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Sphere setup xyzr = np.array([[0, 0, 0, 10], [100, 0, 0, 25], [200, 0, 0, 50]]) colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]]) opacity = 0.5 sphere_actor = actor.sphere(centers=xyzr[:, :3], colors=colors[:], radii=xyzr[:, 3], opacity=opacity) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(sphere_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Advanced geometry actors setup (Arrow, cone, cylinder) xyz = np.array([[0, 0, 0], [50, 0, 0], [100, 0, 0]]) dirs = np.array([[0, 1, 0], [1, 0, 0], [0, 0.5, 0.5]]) colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [1, 1, 0, 1]]) heights = np.array([5, 7, 10]) actor_list = [[actor.cone, {'directions': dirs, 'resolution': 8}], [actor.arrow, {'directions': dirs, 'resolution': 9}], [actor.cylinder, {'directions': dirs}]] for act_func, extra_args in actor_list: aga_actor = act_func(centers=xyz, colors=colors[:], heights=heights, **extra_args) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(aga_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Basic geometry actors (Box, cube, frustum, octagonalprism, rectangle, # square) centers = np.array([[4, 0, 0], [0, 4, 0], [0, 0, 0]]) colors = np.array([[1, 0, 0, 0.4], [0, 1, 0, 0.8], [0, 0, 1, 0.5]]) directions = np.array([[1, 1, 0]]) scale_list = [1, 2, (1, 1, 1), [3, 2, 1], np.array([1, 2, 3]), np.array([[1, 2, 3], [1, 3, 2], [3, 1, 2]])] actor_list = [[actor.box, {}], [actor.cube, {}], [actor.frustum, {}], [actor.octagonalprism, {}], [actor.rectangle, {}], [actor.square, {}]] for act_func, extra_args in actor_list: for scale in scale_list: scene = window.Scene() bga_actor = act_func(centers=centers, directions=directions, colors=colors, scales=scale, **extra_args) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(bga_actor) arr = window.snapshot(scene) report = window.analyze_snapshot(arr) msg = 'Failed with {}, scale={}'.format(act_func.__name__, scale) npt.assert_equal(report.objects, 3, err_msg=msg) scene.clear() # Cone setup using vertices centers = np.array([[0, 0, 0], [20, 0, 0], [40, 0, 0]]) directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]]) vertices = np.array([[0.0, 0.0, 0.0], [0.0, 10.0, 0.0], [10.0, 0.0, 0.0], [0.0, 0.0, 10.0]]) faces = np.array([[0, 1, 3], [0, 1, 2]]) cone_actor = actor.cone(centers=centers, directions=directions, colors=colors[:], vertices=vertices, faces=faces) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(cone_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Superquadric setup centers = np.array([[8, 0, 0], [0, 8, 0], [0, 0, 0]]) colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) directions = np.random.rand(3, 3) scales = [1, 2, 3] roundness = np.array([[1, 1], [1, 2], [2, 1]]) sq_actor = actor.superquadric(centers, roundness=roundness, directions=directions, colors=colors.astype(np.uint8), scales=scales) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(sq_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3) scene.clear() # Reset scene # Label setup text_actor = actor.label("Hello") material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(text_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 5) scene.clear() # Reset scene # Texture setup arr = (255 * np.ones((512, 212, 4))).astype('uint8') arr[20:40, 20:40, :] = np.array([255, 0, 0, 255], dtype='uint8') tp2 = actor.texture(arr) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(tp2) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) scene.clear() # Reset scene # Texture on sphere setup arr = 255 * np.ones((810, 1620, 3), dtype='uint8') rows, cols, _ = arr.shape rs = rows // 2 cs = cols // 2 w = 150 // 2 arr[rs - w: rs + w, cs - 10 * w: cs + 10 * w] = np.array([255, 127, 0]) tsa = actor.texture_on_sphere(arr) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(tsa) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) # NOTE: From this point on, these actors don't have full support for PBR # interpolation. This is, the test passes but there is no evidence of the # desired effect. """ # Setup slicer data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine, value_range=[data.min(), data.max()]) slicer.display(None, None, 25) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(slicer) """ """ # Line setup data1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]]) data2 = data1 + np.array([0.5, 0., 0.]) data = [data1, data2] colors = np.array([[1, 0, 0], [0, 0, 1.]]) lines = actor.line(data, colors, linewidth=5) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(lines) """ """ # Scalar bar setup lut = actor.colormap_lookup_table( scale_range=(0., 100.), hue_range=(0., 0.1), saturation_range=(1, 1), value_range=(1., 1)) sb_actor = actor.scalar_bar(lut, ' ') material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(sb_actor) """ """ # Axes setup axes = actor.axes() material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(axes) """ """ # Peak slicer setup _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4') # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape peak_dirs = np.zeros((11, 11, 11, 3, 3)) peak_dirs[:, :, :] = _peak_dirs peak_actor = actor.peak_slicer(peak_dirs) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(peak_actor) """ """ # Dots setup points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]]) dots_actor = actor.dots(points, color=(0, 255, 0)) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(dots_actor) """ """ # Text3D setup msg = 'I \nlove\n FURY' txt_actor = actor.text_3d(msg) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(txt_actor) """ """ # Figure setup arr = (255 * np.ones((512, 212, 4))).astype('uint8') arr[20:40, 20:40, 3] = 0 tp = actor.figure(arr) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(tp) """ """ # SDF setup centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) * 11 colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) scales = [1, 2, 3] primitive = ['sphere', 'ellipsoid', 'torus'] sdf_actor = actor.sdf(centers, directions=directions, colors=colors, primitives=primitive, scales=scales) material.manifest_standard(surface_actor, ambient_level=.3, diffuse_level=.25) scene.add(sdf_actor) """ # NOTE: For these last set of actors, there is not support for PBR # interpolation at all. """ # Billboard setup centers = np.array([[0, 0, 0], [5, -5, 5], [-7, 7, -7], [10, 10, 10], [10.5, 11.5, 11.5], [12, -12, -12], [-17, 17, 17], [-22, -22, 22]]) colors = np.array([[1, 1, 0], [0, 0, 0], [1, 0, 1], [0, 0, 1], [1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 1, 1]]) scales = [6, .4, 1.2, 1, .2, .7, 3, 2] """ fake_sphere = \ """ float len = length(point); float radius = 1.; if (len > radius) discard; vec3 normalizedPoint = normalize(vec3(point.xy, sqrt(1. - len))); vec3 direction = normalize(vec3(1., 1., 1.)); float df_1 = max(0, dot(direction, normalizedPoint)); float sf_1 = pow(df_1, 24); fragOutput0 = vec4(max(df_1 * color, sf_1 * vec3(1)), 1); """ """ billboard_actor = actor.billboard(centers, colors=colors, scales=scales, fs_impl=fake_sphere) material.manifest_pbr(billboard_actor) scene.add(billboard_actor) """ if interactive: window.show(scene)
def test_odf_slicer(interactive=False): sphere = get_sphere('symmetric362') shape = (11, 11, 11, sphere.vertices.shape[0]) fid, fname = mkstemp(suffix='_odf_slicer.mmap') print(fid) print(fname) odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape) odfs[:] = 1 affine = np.eye(4) scene = window.Scene() mask = np.ones(odfs.shape[:3]) mask[:4, :4, :4] = 0 odfs[..., 0] = 1 odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere, scale=.25, colormap='plasma') fa = 0. * np.zeros(odfs.shape[:3]) fa[:, 0, :] = 1. fa[:, -1, :] = 1. fa[0, :, :] = 1. fa[-1, :, :] = 1. fa[5, 5, 5] = 1 k = 5 I, J, K = odfs.shape[:3] fa_actor = actor.slicer(fa, affine) fa_actor.display_extent(0, I, 0, J, k, k) scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() odf_actor.display_extent(0, I, 0, J, k, k) odf_actor.GetProperty().SetOpacity(1.0) if interactive: window.show(scene, reset_camera=False) arr = window.snapshot(scene) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 11 * 11) scene.clear() scene.add(fa_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) mask[:] = 0 mask[5, 5, 5] = 1 fa[5, 5, 5] = 0 fa_actor = actor.slicer(fa, None) fa_actor.display(None, None, 5) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) scene.clear() scene.add(fa_actor) scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) scene.clear() scene.add(odf_actor) scene.add(fa_actor) odfs[:, :, :] = 1 mask = np.ones(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) scene.clear() scene.add(odf_actor) scene.add(fa_actor) scene.add(actor.axes((11, 11, 11))) for i in range(11): odf_actor.display(i, None, None) fa_actor.display(i, None, None) if interactive: window.show(scene) for j in range(11): odf_actor.display(None, j, None) fa_actor.display(None, j, None) if interactive: window.show(scene) # with mask equal to zero everything should be black mask = np.zeros(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) scene.clear() scene.add(odf_actor) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) report = window.analyze_scene(scene) npt.assert_equal(report.actors, 1) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') del odf_actor odfs._mmap.close() del odfs os.close(fid) os.remove(fname)
def test_frame_rate_and_anti_aliasing(): """Testing frame rate with/out anti-aliasing""" length_ = 200 multi_samples = 32 max_peels = 8 st_x = np.arange(length_) st_y = np.sin(np.arange(length_)) st_z = np.zeros(st_x.shape) st = np.zeros((length_, 3)) st[:, 0] = st_x st[:, 1] = st_y st[:, 2] = st_z all_st = [] all_st.append(st) for i in range(1000): all_st.append(st + i * np.array([0., .5, 0])) # st_actor = actor.line(all_st, linewidth=1) # TODO: textblock disappears when lod=True st_actor = actor.streamtube(all_st, linewidth=0.1, lod=False) scene = window.Scene() scene.background((1, 1., 1)) # quick game style antialiasing scene.fxaa_on() scene.fxaa_off() # the good staff is later with multi-sampling tb = ui.TextBlock2D(font_size=40, color=(1, 0.5, 0)) panel = ui.Panel2D(position=(400, 400), size=(400, 400)) panel.add_element(tb, (0.2, 0.5)) counter = itertools.count() showm = window.ShowManager(scene, size=(1980, 1080), reset_camera=False, order_transparent=True, multi_samples=multi_samples, max_peels=max_peels, occlusion_ratio=0.0) showm.initialize() scene.add(panel) scene.add(st_actor) scene.reset_camera_tight() scene.zoom(5) class FrameRateHolder(object): fpss = [] frh = FrameRateHolder() def timer_callback(_obj, _event): cnt = next(counter) if cnt % 1 == 0: fps = np.round(scene.frame_rate, 0) frh.fpss.append(fps) msg = "FPS " + str(fps) + ' ' + str(cnt) tb.message = msg showm.render() if cnt > 10: showm.exit() # Run every 200 milliseconds showm.add_timer_callback(True, 200, timer_callback) showm.start() arr = window.snapshot(scene, size=(1980, 1080), offscreen=True, order_transparent=True, multi_samples=multi_samples, max_peels=max_peels, occlusion_ratio=0.0) assert_greater(np.sum(arr), 0) # TODO: check why in osx we have issues in Azure if not skip_osx: assert_greater(np.median(frh.fpss), 0) frh.fpss = [] counter = itertools.count() multi_samples = 0 showm = window.ShowManager(scene, size=(1980, 1080), reset_camera=False, order_transparent=True, multi_samples=multi_samples, max_peels=max_peels, occlusion_ratio=0.0) showm.initialize() showm.add_timer_callback(True, 200, timer_callback) showm.start() arr2 = window.snapshot(scene, size=(1980, 1080), offscreen=True, order_transparent=True, multi_samples=multi_samples, max_peels=max_peels, occlusion_ratio=0.0) assert_greater(np.sum(arr2), 0) if not skip_osx: assert_greater(np.median(frh.fpss), 0)
def test_grid(_interactive=False): vol1 = np.zeros((100, 100, 100)) vol1[25:75, 25:75, 25:75] = 100 contour_actor1 = actor.contour_from_roi(vol1, np.eye(4), (1., 0, 0), 1.) vol2 = np.zeros((100, 100, 100)) vol2[25:75, 25:75, 25:75] = 100 contour_actor2 = actor.contour_from_roi(vol2, np.eye(4), (1., 0.5, 0), 1.) vol3 = np.zeros((100, 100, 100)) vol3[25:75, 25:75, 25:75] = 100 contour_actor3 = actor.contour_from_roi(vol3, np.eye(4), (1., 0.5, 0.5), 1.) scene = window.Scene() actors = [] texts = [] actors.append(contour_actor1) text_actor1 = actor.text_3d('cube 1', justification='center') texts.append(text_actor1) actors.append(contour_actor2) text_actor2 = actor.text_3d('cube 2', justification='center') texts.append(text_actor2) actors.append(contour_actor3) text_actor3 = actor.text_3d('cube 3', justification='center') texts.append(text_actor3) actors.append(shallow_copy(contour_actor1)) text_actor1 = 'cube 4' texts.append(text_actor1) actors.append(shallow_copy(contour_actor2)) text_actor2 = 'cube 5' texts.append(text_actor2) actors.append(shallow_copy(contour_actor3)) text_actor3 = 'cube 6' texts.append(text_actor3) # show the grid without the captions container = grid(actors=actors, captions=None, caption_offset=(0, -40, 0), cell_padding=(10, 10), dim=(2, 3)) scene.add(container) scene.projection('orthogonal') counter = itertools.count() show_m = window.ShowManager(scene) show_m.initialize() def timer_callback(_obj, _event): nonlocal counter cnt = next(counter) # show_m.scene.zoom(1) show_m.render() if cnt == 4: show_m.exit() show_m.destroy_timers() show_m.add_timer_callback(True, 200, timer_callback) show_m.start() arr = window.snapshot(scene) arr[arr < 20] = 0 report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 6) scene.rm_all() counter = itertools.count() show_m = window.ShowManager(scene) show_m.initialize() # show the grid with the captions container = grid(actors=actors, captions=texts, caption_offset=(0, -50, 0), cell_padding=(10, 10), dim=(3, 3)) scene.add(container) show_m.add_timer_callback(True, 200, timer_callback) show_m.start() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects > 6, True)
def test_grid_ui(interactive=False): vol1 = np.zeros((100, 100, 100)) vol1[25:75, 25:75, 25:75] = 100 colors = distinguishable_colormap(nb_colors=3) contour_actor1 = actor.contour_from_roi(vol1, np.eye(4), colors[0], 1.) vol2 = np.zeros((100, 100, 100)) vol2[25:75, 25:75, 25:75] = 100 contour_actor2 = actor.contour_from_roi(vol2, np.eye(4), colors[1], 1.) vol3 = np.zeros((100, 100, 100)) vol3[25:75, 25:75, 25:75] = 100 contour_actor3 = actor.contour_from_roi(vol3, np.eye(4), colors[2], 1.) scene = window.Scene() actors = [] texts = [] actors.append(contour_actor1) text_actor1 = actor.text_3d('cube 1', justification='center') texts.append(text_actor1) actors.append(contour_actor2) text_actor2 = actor.text_3d('cube 2', justification='center') texts.append(text_actor2) actors.append(contour_actor3) text_actor3 = actor.text_3d('cube 3', justification='center') texts.append(text_actor3) actors.append(shallow_copy(contour_actor1)) text_actor1 = actor.text_3d('cube 4', justification='center') texts.append(text_actor1) actors.append(shallow_copy(contour_actor2)) text_actor2 = actor.text_3d('cube 5', justification='center') texts.append(text_actor2) actors.append(shallow_copy(contour_actor3)) text_actor3 = actor.text_3d('cube 6', justification='center') texts.append(text_actor3) actors.append(shallow_copy(contour_actor1)) text_actor1 = actor.text_3d('cube 7', justification='center') texts.append(text_actor1) actors.append(shallow_copy(contour_actor2)) text_actor2 = actor.text_3d('cube 8', justification='center') texts.append(text_actor2) actors.append(shallow_copy(contour_actor3)) text_actor3 = actor.text_3d('cube 9', justification='center') texts.append(text_actor3) counter = itertools.count() show_m = window.ShowManager(scene) show_m.initialize() def timer_callback(_obj, _event): cnt = next(counter) show_m.scene.zoom(1) show_m.render() if cnt == 10: show_m.exit() show_m.destroy_timers() # show the grid with the captions grid_ui = ui.GridUI(actors=actors, captions=texts, caption_offset=(0, -50, 0), cell_padding=(60, 60), dim=(3, 3), rotation_axis=(1, 0, 0)) scene.add(grid_ui) show_m.add_timer_callback(True, 200, timer_callback) show_m.start() arr = window.snapshot(scene) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects > 9, True) # this needs to happen automatically when start() ends. for act in actors: act.RemoveAllObservers() filename = "test_grid_ui" recording_filename = pjoin(DATA_DIR, filename + ".log.gz") expected_events_counts_filename = pjoin(DATA_DIR, filename + ".json") current_size = (900, 600) scene = window.Scene() show_manager = window.ShowManager(scene, size=current_size, title="FURY GridUI") show_manager.initialize() grid_ui2 = ui.GridUI(actors=actors, captions=texts, caption_offset=(0, -50, 0), cell_padding=(60, 60), dim=(3, 3), rotation_axis=None) scene.add(grid_ui2) event_counter = EventCounter() event_counter.monitor(grid_ui2) if interactive: show_manager.start() recording = False if recording: # Record the following events # 1. Left click on top left box (will rotate the box) show_manager.record_events_to_file(recording_filename) # print(list(event_counter.events_counts.items())) event_counter.save(expected_events_counts_filename) else: show_manager.play_events_from_file(recording_filename) expected = EventCounter.load(expected_events_counts_filename) event_counter.check_counts(expected)
def test_contour_from_roi(interactive=False): # Render volume scene = window.Scene() data = np.zeros((50, 50, 50)) data[20:30, 25, 25] = 1. data[25, 20:30, 25] = 1. affine = np.eye(4) surface = actor.contour_from_roi(data, affine, color=np.array([1, 0, 1]), opacity=.5) scene.add(surface) scene.reset_camera() scene.reset_clipping_range() if interactive: window.show(scene) # Test Errors npt.assert_raises(ValueError, actor.contour_from_roi, np.ones(50)) # Test binarization scene2 = window.Scene() data2 = np.zeros((50, 50, 50)) data2[20:30, 25, 25] = 1. data2[35:40, 25, 25] = 1. affine = np.eye(4) surface2 = actor.contour_from_roi(data2, affine, color=np.array([0, 1, 1]), opacity=.5) scene2.add(surface2) scene2.reset_camera() scene2.reset_clipping_range() if interactive: window.show(scene2) arr = window.snapshot(scene, 'test_surface.png', offscreen=True) arr2 = window.snapshot(scene2, 'test_surface2.png', offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) report2 = window.analyze_snapshot(arr2, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_equal(report2.objects, 2) # test on real streamlines using tracking example if have_dipy: from dipy.data import read_stanford_labels from dipy.reconst.shm import CsaOdfModel from dipy.data import default_sphere from dipy.direction import peaks_from_model from fury.colormap import line_colors from dipy.tracking import utils try: from dipy.tracking.local import ThresholdTissueClassifier \ as ThresholdStoppingCriterion from dipy.tracking.local import LocalTracking except ImportError: from dipy.tracking.stopping_criterion import \ ThresholdStoppingCriterion from dipy.tracking.local_tracking import LocalTracking hardi_img, gtab, labels_img = read_stanford_labels() data = np.asanyarray(hardi_img.dataobj) labels = np.asanyarray(labels_img.dataobj) affine = hardi_img.affine white_matter = (labels == 1) | (labels == 2) csa_model = CsaOdfModel(gtab, sh_order=6) csa_peaks = peaks_from_model(csa_model, data, default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=white_matter) classifier = ThresholdStoppingCriterion(csa_peaks.gfa, .25) seed_mask = labels == 2 seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine) # Initialization of LocalTracking. # The computation happens in the next step. streamlines = LocalTracking(csa_peaks, classifier, seeds, affine, step_size=2) # Compute streamlines and store as a list. streamlines = list(streamlines) # Prepare the display objects. streamlines_actor = actor.line(streamlines, line_colors(streamlines)) seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5) # Create the 3d display. r = window.Scene() r2 = window.Scene() r.add(streamlines_actor) arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True) report3 = window.analyze_snapshot(arr3, find_objects=True) r2.add(streamlines_actor) r2.add(seedroi_actor) arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True) report4 = window.analyze_snapshot(arr4, find_objects=True) # assert that the seed ROI rendering is not far # away from the streamlines (affine error) npt.assert_equal(report3.objects, report4.objects)
def test_add_shader_callback(): cube = generate_cube_with_effect() showm = window.ShowManager() showm.scene.add(cube) class Timer(object): idx = 0.0 timer = Timer() def timer_callback(obj, event): # nonlocal timer, showm timer.idx += 1.0 showm.render() if timer.idx > 90: showm.exit() def my_cbk(_caller, _event, calldata=None): program = calldata if program is not None: try: program.SetUniformf("time", timer.idx) except ValueError: pass add_shader_callback(cube, my_cbk) showm.initialize() showm.add_timer_callback(True, 100, timer_callback) showm.start() arr = window.snapshot(showm.scene, offscreen=True) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 1) cone_actor = actor.cone(np.array([[0, 0, 0]]), np.array([[0, 1, 0]]), (0, 0, 1)) test_values = [] def callbackLow(_caller, _event, calldata=None): program = calldata if program is not None: test_values.append(0) id_observer = add_shader_callback(cone_actor, callbackLow, 0) with pytest.raises(Exception): add_shader_callback(cone_actor, callbackLow, priority='str') mapper = cone_actor.GetMapper() mapper.RemoveObserver(id_observer) scene = window.Scene() scene.add(cone_actor) arr1 = window.snapshot(scene, size=(200, 200)) assert len(test_values) == 0 test_values = [] def callbackHigh(_caller, _event, calldata=None): program = calldata if program is not None: test_values.append(999) def callbackMean(_caller, _event, calldata=None): program = calldata if program is not None: test_values.append(500) add_shader_callback(cone_actor, callbackHigh, 999) add_shader_callback(cone_actor, callbackLow, 0) id_mean = add_shader_callback(cone_actor, callbackMean, 500) # check the priority of each call arr2 = window.snapshot(scene, size=(200, 200)) assert np.abs( [test_values[0] - 999, test_values[1] - 500, test_values[2] - 0]).sum() == 0 # check if the correct observer was removed mapper.RemoveObserver(id_mean) test_values = [] arr3 = window.snapshot(scene, size=(200, 200)) assert np.abs([test_values[0] - 999, test_values[1] - 0]).sum() == 0