Example #1
0
def test_slicer():
    renderer = window.renderer()
    data = (255 * np.random.rand(50, 50, 50))
    affine = np.eye(4)
    slicer = actor.slicer(data, affine)
    slicer.display(None, None, 25)
    renderer.add(slicer)

    renderer.reset_camera()
    renderer.reset_clipping_range()
    # window.show(renderer)

    # copy pixels in numpy array directly
    arr = window.snapshot(renderer, 'test_slicer.png', offscreen=True)
    import scipy
    print(scipy.__version__)
    print(scipy.__file__)

    print(arr.sum())
    print(np.sum(arr == 0))
    print(np.sum(arr > 0))
    print(arr.shape)
    print(arr.dtype)

    report = window.analyze_snapshot(arr, find_objects=True)

    npt.assert_equal(report.objects, 1)
    # print(arr[..., 0])

    # The slicer can cut directly a smaller part of the image
    slicer.display_extent(10, 30, 10, 30, 35, 35)
    renderer.ResetCamera()

    renderer.add(slicer)

    # save pixels in png file not a numpy array
    with InTemporaryDirectory() as tmpdir:
        fname = os.path.join(tmpdir, 'slice.png')
        # window.show(renderer)
        window.snapshot(renderer, fname, offscreen=True)
        report = window.analyze_snapshot(fname, find_objects=True)
        npt.assert_equal(report.objects, 1)

    npt.assert_raises(ValueError, actor.slicer, np.ones(10))

    renderer.clear()

    rgb = np.zeros((30, 30, 30, 3))
    rgb[..., 0] = 1.
    rgb_actor = actor.slicer(rgb)

    renderer.add(rgb_actor)

    renderer.reset_camera()
    renderer.reset_clipping_range()

    arr = window.snapshot(renderer, offscreen=True)
    report = window.analyze_snapshot(arr, colors=[(255, 0, 0)])
    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report.colors_found, [True])

    lut = actor.colormap_lookup_table(scale_range=(0, 255),
                                      hue_range=(0.4, 1.),
                                      saturation_range=(1, 1.),
                                      value_range=(0., 1.))
    renderer.clear()
    slicer_lut = actor.slicer(data, lookup_colormap=lut)

    slicer_lut.display(10, None, None)
    slicer_lut.display(None, 10, None)
    slicer_lut.display(None, None, 10)

    slicer_lut.opacity(0.5)
    slicer_lut.tolerance(0.03)
    slicer_lut2 = slicer_lut.copy()
    npt.assert_equal(slicer_lut2.GetOpacity(), 0.5)
    npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03)
    slicer_lut2.opacity(1)
    slicer_lut2.tolerance(0.025)
    slicer_lut2.display(None, None, 10)
    renderer.add(slicer_lut2)

    renderer.reset_clipping_range()

    arr = window.snapshot(renderer, offscreen=True)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 1)

    renderer.clear()

    data = (255 * np.random.rand(50, 50, 50))
    affine = np.diag([1, 3, 2, 1])
    slicer = actor.slicer(data, affine, interpolation='nearest')
    slicer.display(None, None, 25)

    renderer.add(slicer)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    arr = window.snapshot(renderer, offscreen=True)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 1)
    npt.assert_equal(data.shape, slicer.shape)

    renderer.clear()

    data = (255 * np.random.rand(50, 50, 50))
    affine = np.diag([1, 3, 2, 1])

    from dipy.align.reslice import reslice

    data2, affine2 = reslice(data, affine, zooms=(1, 3, 2),
                             new_zooms=(1, 1, 1))

    slicer = actor.slicer(data2, affine2, interpolation='linear')
    slicer.display(None, None, 25)

    renderer.add(slicer)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    # window.show(renderer, reset_camera=False)
    arr = window.snapshot(renderer, offscreen=True)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 1)
    npt.assert_array_equal([1, 3, 2] * np.array(data.shape),
                           np.array(slicer.shape))
Example #2
0
def test_bundle_maps():
    renderer = window.renderer()
    bundle = fornix_streamlines()
    bundle, shift = center_streamlines(bundle)

    mat = np.array([[1, 0, 0, 100],
                    [0, 1, 0, 100],
                    [0, 0, 1, 100],
                    [0, 0, 0, 1.]])

    bundle = transform_streamlines(bundle, mat)

    # metric = np.random.rand(*(200, 200, 200))
    metric = 100 * np.ones((200, 200, 200))

    # add lower values
    metric[100, :, :] = 100 * 0.5

    # create a nice orange-red colormap
    lut = actor.colormap_lookup_table(scale_range=(0., 100.),
                                      hue_range=(0., 0.1),
                                      saturation_range=(1, 1),
                                      value_range=(1., 1))

    line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
    window.add(renderer, line)
    window.add(renderer, actor.scalar_bar(lut, ' '))

    report = window.analyze_renderer(renderer)

    npt.assert_almost_equal(report.actors, 1)
    # window.show(renderer)

    renderer.clear()

    nb_points = np.sum([len(b) for b in bundle])
    values = 100 * np.random.rand(nb_points)
    # values[:nb_points/2] = 0

    line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
    renderer.add(line)
    # window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    renderer.clear()

    colors = np.random.rand(nb_points, 3)
    # values[:nb_points/2] = 0

    line = actor.line(bundle, colors, linewidth=2)
    renderer.add(line)
    # window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
    # window.show(renderer)

    arr = window.snapshot(renderer)
    report2 = window.analyze_snapshot(arr)
    npt.assert_equal(report2.objects, 1)

    # try other input options for colors
    renderer.clear()
    actor.line(bundle, (1., 0.5, 0))
    actor.line(bundle, np.arange(len(bundle)))
    actor.line(bundle)
    colors = [np.random.rand(*b.shape) for b in bundle]
    actor.line(bundle, colors=colors)
Example #3
0
def test_grid(_interactive=False):

    vol1 = np.zeros((100, 100, 100))
    vol1[25:75, 25:75, 25:75] = 100
    contour_actor1 = actor.contour_from_roi(vol1, np.eye(4), (1., 0, 0), 1.)

    vol2 = np.zeros((100, 100, 100))
    vol2[25:75, 25:75, 25:75] = 100

    contour_actor2 = actor.contour_from_roi(vol2, np.eye(4), (1., 0.5, 0), 1.)
    vol3 = np.zeros((100, 100, 100))
    vol3[25:75, 25:75, 25:75] = 100

    contour_actor3 = actor.contour_from_roi(vol3, np.eye(4), (1., 0.5, 0.5),
                                            1.)

    scene = window.Scene()
    actors = []
    texts = []

    actors.append(contour_actor1)
    text_actor1 = actor.text_3d('cube 1', justification='center')
    texts.append(text_actor1)

    actors.append(contour_actor2)
    text_actor2 = actor.text_3d('cube 2', justification='center')
    texts.append(text_actor2)

    actors.append(contour_actor3)
    text_actor3 = actor.text_3d('cube 3', justification='center')
    texts.append(text_actor3)

    actors.append(shallow_copy(contour_actor1))
    text_actor1 = 'cube 4'
    texts.append(text_actor1)

    actors.append(shallow_copy(contour_actor2))
    text_actor2 = 'cube 5'
    texts.append(text_actor2)

    actors.append(shallow_copy(contour_actor3))
    text_actor3 = 'cube 6'
    texts.append(text_actor3)

    # show the grid without the captions
    container = grid(actors=actors,
                     captions=None,
                     caption_offset=(0, -40, 0),
                     cell_padding=(10, 10),
                     dim=(2, 3))

    scene.add(container)

    scene.projection('orthogonal')

    counter = itertools.count()

    show_m = window.ShowManager(scene)

    show_m.initialize()

    def timer_callback(_obj, _event):
        cnt = next(counter)
        # show_m.scene.zoom(1)
        show_m.render()
        if cnt == 4:
            show_m.exit()
            show_m.destroy_timers()

    show_m.add_timer_callback(True, 200, timer_callback)
    show_m.start()

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 6)

    scene.rm_all()

    counter = itertools.count()
    show_m = window.ShowManager(scene)
    show_m.initialize()
    # show the grid with the captions
    container = grid(actors=actors,
                     captions=texts,
                     caption_offset=(0, -50, 0),
                     cell_padding=(10, 10),
                     dim=(3, 3))

    scene.add(container)

    show_m.add_timer_callback(True, 200, timer_callback)
    show_m.start()

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects > 6, True)
Example #4
0
def test_contour_from_roi():

    # Render volume
    renderer = window.renderer()
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data, affine,
                                     color=np.array([1, 0, 1]),
                                     opacity=.5)
    renderer.add(surface)

    renderer.reset_camera()
    renderer.reset_clipping_range()
    # window.show(renderer)

    # Test binarization
    renderer2 = window.renderer()
    data2 = np.zeros((50, 50, 50))
    data2[20:30, 25, 25] = 1.
    data2[35:40, 25, 25] = 1.
    affine = np.eye(4)
    surface2 = actor.contour_from_roi(data2, affine,
                                      color=np.array([0, 1, 1]),
                                      opacity=.5)
    renderer2.add(surface2)

    renderer2.reset_camera()
    renderer2.reset_clipping_range()
    # window.show(renderer2)

    arr = window.snapshot(renderer, 'test_surface.png', offscreen=True)
    arr2 = window.snapshot(renderer2, 'test_surface2.png', offscreen=True)

    report = window.analyze_snapshot(arr, find_objects=True)
    report2 = window.analyze_snapshot(arr2, find_objects=True)

    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report2.objects, 2)

    # test on real streamlines using tracking example
    from dipy.data import read_stanford_labels
    from dipy.reconst.shm import CsaOdfModel
    from dipy.data import default_sphere
    from dipy.direction import peaks_from_model
    from dipy.tracking.local import ThresholdTissueClassifier
    from dipy.tracking import utils
    from dipy.tracking.local import LocalTracking
    from fury.colormap import line_colors

    hardi_img, gtab, labels_img = read_stanford_labels()
    data = hardi_img.get_data()
    labels = labels_img.get_data()
    affine = hardi_img.affine

    white_matter = (labels == 1) | (labels == 2)

    csa_model = CsaOdfModel(gtab, sh_order=6)
    csa_peaks = peaks_from_model(csa_model, data, default_sphere,
                                 relative_peak_threshold=.8,
                                 min_separation_angle=45,
                                 mask=white_matter)

    classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)

    seed_mask = labels == 2
    seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

    # Initialization of LocalTracking.
    # The computation happens in the next step.
    streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                                step_size=2)

    # Compute streamlines and store as a list.
    streamlines = list(streamlines)

    # Prepare the display objects.
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))
    seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5)

    # Create the 3d display.
    r = window.Renderer()
    r2 = window.Renderer()
    r.add(streamlines_actor)
    arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
    report3 = window.analyze_snapshot(arr3, find_objects=True)
    r2.add(streamlines_actor)
    r2.add(seedroi_actor)
    arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
    report4 = window.analyze_snapshot(arr4, find_objects=True)

    # assert that the seed ROI rendering is not far
    # away from the streamlines (affine error)
    npt.assert_equal(report3.objects, report4.objects)
Example #5
0
def test_scene():
    scene = window.Scene()
    # Scene size test
    npt.assert_equal(scene.size(), (0, 0))
    # Color background test
    # Background color for scene (1, 0.5, 0). 0.001 added here to remove
    # numerical errors when moving from float to int values
    bg_float = (1, 0.501, 0)
    # That will come in the image in the 0-255 uint scale
    bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8'))
    scene.background(bg_float)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color=bg_color,
                                     colors=[bg_color, (0, 127, 0)])
    npt.assert_equal(report.objects, 0)
    npt.assert_equal(report.colors_found, [True, False])
    # Add actor to scene to test the remove actor function by analyzing a
    # snapshot
    axes = actor.axes()
    scene.add(axes)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 1)
    # Test remove actor function by analyzing a snapshot
    scene.rm(axes)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 0)
    # Add actor to scene to test the remove all actors function by analyzing a
    # snapshot
    scene.add(axes)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 1)
    # Test remove all actors function by analyzing a snapshot
    scene.rm_all()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 0)
    # Test change background color from scene by analyzing the scene
    ren2 = window.Scene(bg_float)
    ren2.background((0, 0, 0.))
    report = window.analyze_scene(ren2)
    npt.assert_equal(report.bg_color, (0, 0, 0))
    # Add actor to scene to test the remove actor function by analyzing the
    # scene
    ren2.add(axes)
    report = window.analyze_scene(ren2)
    npt.assert_equal(report.actors, 1)
    # Test remove actor function by analyzing the scene
    ren2.rm(axes)
    report = window.analyze_scene(ren2)
    npt.assert_equal(report.actors, 0)
    # Test camera information retrieving
    with captured_output() as (out, err):
        scene.camera_info()
    npt.assert_equal(out.getvalue().strip(),
                     '# Active Camera\n   '
                     'Position (0.00, 0.00, 1.00)\n   '
                     'Focal Point (0.00, 0.00, 0.00)\n   '
                     'View Up (0.00, 1.00, 0.00)')
    npt.assert_equal(err.getvalue().strip(), '')
    # Test skybox
    scene = window.Scene()
    npt.assert_equal(scene.GetUseImageBasedLighting(), False)
    npt.assert_equal(scene.GetAutomaticLightCreation(), 1)
    npt.assert_equal(scene.GetSphericalHarmonics(), None)
    npt.assert_equal(scene.GetEnvironmentTexture(), None)
    test_tex = Texture()
    scene = window.Scene(skybox=test_tex)
    npt.assert_equal(scene.GetUseImageBasedLighting(), True)
    npt.assert_equal(scene.GetAutomaticLightCreation(), 0)
    npt.assert_equal(scene.GetSphericalHarmonics(), None)
    npt.assert_equal(scene.GetEnvironmentTexture(), test_tex)
    # Test automatically shown skybox
    test_tex = Texture()
    test_tex.CubeMapOn()
    checker_arr = np.array([[1, 0], [0, 1]], dtype=np.uint8) * 255
    for i in range(6):
        vtk_img = ImageData()
        vtk_img.SetDimensions(2, 2, 1)
        img_arr = np.zeros((2, 2, 3), dtype=np.uint8)
        img_arr[:, :, i // 2] = checker_arr
        vtk_arr = numpy_support.numpy_to_vtk(img_arr.reshape((-1, 3),
                                                             order='F'))
        vtk_arr.SetName('Image')
        img_point_data = vtk_img.GetPointData()
        img_point_data.AddArray(vtk_arr)
        img_point_data.SetActiveScalars('Image')
        test_tex.SetInputDataObject(i, vtk_img)
    scene = window.Scene(skybox=test_tex)
    report = window.analyze_scene(scene)
    npt.assert_equal(report.actors, 1)
    ss = window.snapshot(scene)
    npt.assert_array_equal(ss[75, 75, :], [0, 0, 255])
    npt.assert_array_equal(ss[75, 225, :], [0, 0, 0])
    scene.yaw(90)
    ss = window.snapshot(scene)
    npt.assert_array_equal(ss[75, 75, :], [255, 0, 0])
    npt.assert_array_equal(ss[75, 225, :], [0, 0, 0])
    scene.pitch(90)
    ss = window.snapshot(scene)
    npt.assert_array_equal(ss[75, 75, :], [0, 0, 0])
    npt.assert_array_equal(ss[75, 225, :], [0, 255, 0])
Example #6
0
def test_scene():

    scene = window.Scene()

    npt.assert_equal(scene.size(), (0, 0))

    # background color for scene (1, 0.5, 0)
    # 0.001 added here to remove numerical errors when moving from float
    # to int values
    bg_float = (1, 0.501, 0)

    # that will come in the image in the 0-255 uint scale
    bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8'))

    scene.background(bg_float)
    # window.show(scene)
    arr = window.snapshot(scene)

    report = window.analyze_snapshot(arr,
                                     bg_color=bg_color,
                                     colors=[bg_color, (0, 127, 0)])
    npt.assert_equal(report.objects, 0)
    npt.assert_equal(report.colors_found, [True, False])

    axes = actor.axes()
    scene.add(axes)
    # window.show(scene)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 1)

    scene.rm(axes)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 0)

    scene.add(axes)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 1)

    scene.rm_all()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 0)

    ren2 = window.Scene(bg_float)
    ren2.background((0, 0, 0.))

    report = window.analyze_scene(ren2)
    npt.assert_equal(report.bg_color, (0, 0, 0))

    ren2.add(axes)

    report = window.analyze_scene(ren2)
    npt.assert_equal(report.actors, 3)

    ren2.rm(axes)
    report = window.analyze_scene(ren2)
    npt.assert_equal(report.actors, 0)

    with captured_output() as (out, err):
        scene.camera_info()
    npt.assert_equal(
        out.getvalue().strip(), '# Active Camera\n   '
        'Position (0.00, 0.00, 1.00)\n   '
        'Focal Point (0.00, 0.00, 0.00)\n   '
        'View Up (0.00, 1.00, 0.00)')
    npt.assert_equal(err.getvalue().strip(), '')
Example #7
0
def test_grid_ui(interactive=False):

    vol1 = np.zeros((100, 100, 100))
    vol1[25:75, 25:75, 25:75] = 100

    colors = distinguishable_colormap(nb_colors=3)
    contour_actor1 = actor.contour_from_roi(vol1, np.eye(4), colors[0], 1.)

    vol2 = np.zeros((100, 100, 100))
    vol2[25:75, 25:75, 25:75] = 100

    contour_actor2 = actor.contour_from_roi(vol2, np.eye(4), colors[1], 1.)

    vol3 = np.zeros((100, 100, 100))
    vol3[25:75, 25:75, 25:75] = 100

    contour_actor3 = actor.contour_from_roi(vol3, np.eye(4), colors[2], 1.)

    scene = window.Scene()
    actors = []
    texts = []

    actors.append(contour_actor1)
    text_actor1 = actor.text_3d('cube 1', justification='center')
    texts.append(text_actor1)

    actors.append(contour_actor2)
    text_actor2 = actor.text_3d('cube 2', justification='center')
    texts.append(text_actor2)

    actors.append(contour_actor3)
    text_actor3 = actor.text_3d('cube 3', justification='center')
    texts.append(text_actor3)

    actors.append(shallow_copy(contour_actor1))
    text_actor1 = actor.text_3d('cube 4', justification='center')
    texts.append(text_actor1)

    actors.append(shallow_copy(contour_actor2))
    text_actor2 = actor.text_3d('cube 5', justification='center')
    texts.append(text_actor2)

    actors.append(shallow_copy(contour_actor3))
    text_actor3 = actor.text_3d('cube 6', justification='center')
    texts.append(text_actor3)

    actors.append(shallow_copy(contour_actor1))
    text_actor1 = actor.text_3d('cube 7', justification='center')
    texts.append(text_actor1)

    actors.append(shallow_copy(contour_actor2))
    text_actor2 = actor.text_3d('cube 8', justification='center')
    texts.append(text_actor2)

    actors.append(shallow_copy(contour_actor3))
    text_actor3 = actor.text_3d('cube 9', justification='center')
    texts.append(text_actor3)

    counter = itertools.count()
    show_m = window.ShowManager(scene)
    show_m.initialize()

    def timer_callback(_obj, _event):
        cnt = next(counter)
        show_m.scene.zoom(1)
        show_m.render()
        if cnt == 10:
            show_m.exit()

    # show the grid with the captions
    grid_ui = ui.GridUI(actors=actors,
                        captions=texts,
                        caption_offset=(0, -50, 0),
                        cell_padding=(60, 60),
                        dim=(3, 3),
                        rotation_axis=(1, 0, 0))

    scene.add(grid_ui)

    show_m.add_timer_callback(True, 200, timer_callback)
    show_m.start()

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects > 9, True)

    # this needs to happen automatically when start() ends.
    for act in actors:
        act.RemoveAllObservers()

    filename = "test_grid_ui"
    recording_filename = pjoin(DATA_DIR, filename + ".log.gz")
    expected_events_counts_filename = pjoin(DATA_DIR, filename + ".pkl")

    current_size = (900, 600)
    scene = window.Scene()
    show_manager = window.ShowManager(scene,
                                      size=current_size,
                                      title="FURY GridUI")
    show_manager.initialize()

    grid_ui2 = ui.GridUI(actors=actors,
                         captions=texts,
                         caption_offset=(0, -50, 0),
                         cell_padding=(60, 60),
                         dim=(3, 3),
                         rotation_axis=None)

    scene.add(grid_ui2)

    event_counter = EventCounter()
    event_counter.monitor(grid_ui2)

    if interactive:
        show_manager.start()
    recording = False

    if recording:
        # Record the following events
        # 1. Left click on top left box (will rotate the box)
        show_manager.record_events_to_file(recording_filename)
        print(list(event_counter.events_counts.items()))
        event_counter.save(expected_events_counts_filename)

    else:
        show_manager.play_events_from_file(recording_filename)
        expected = EventCounter.load(expected_events_counts_filename)
        event_counter.check_counts(expected)
Example #8
0
def test_active_camera():
    renderer = window.Renderer()
    renderer.add(actor.axes(scale=(1, 1, 1)))

    renderer.reset_camera()
    renderer.reset_clipping_range()

    direction = renderer.camera_direction()
    position, focal_point, view_up = renderer.get_camera()

    renderer.set_camera((0., 0., 1.), (0., 0., 0), view_up)

    position, focal_point, view_up = renderer.get_camera()
    npt.assert_almost_equal(np.dot(direction, position), -1)

    renderer.zoom(1.5)

    new_position, _, _ = renderer.get_camera()

    npt.assert_array_almost_equal(position, new_position)

    renderer.zoom(1)

    # rotate around focal point
    renderer.azimuth(90)

    position, _, _ = renderer.get_camera()

    npt.assert_almost_equal(position, (1.0, 0.0, 0))

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, colors=[(255, 0, 0)])
    npt.assert_equal(report.colors_found, [True])

    # rotate around camera's center
    renderer.yaw(90)

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, colors=[(0, 0, 0)])
    npt.assert_equal(report.colors_found, [True])

    renderer.yaw(-90)
    renderer.elevation(90)

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, colors=(0, 255, 0))
    npt.assert_equal(report.colors_found, [True])

    renderer.set_camera((0., 0., 1.), (0., 0., 0), view_up)

    # vertical rotation of the camera around the focal point
    renderer.pitch(10)
    renderer.pitch(-10)

    # rotate around the direction of projection
    renderer.roll(90)

    # inverted normalized distance from focal point along the direction
    # of the camera

    position, _, _ = renderer.get_camera()
    renderer.dolly(0.5)
    new_position, _, _ = renderer.get_camera()
    npt.assert_almost_equal(position[2], 0.5 * new_position[2])
Example #9
0
def test_sdf_actor(interactive=False):
    scene = window.Scene()
    scene.background((1, 1, 1))
    centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0], [2, 2, 0]]) * 11
    colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]])
    directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 1, 0]])
    scales = [1, 2, 3, 4]
    primitive = ['sphere', 'ellipsoid', 'torus', 'capsule']

    sdf_actor = actor.sdf(centers, directions,
                          colors, primitive, scales)
    scene.add(sdf_actor)
    scene.add(actor.axes())
    if interactive:
        window.show(scene)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, colors=colors)
    npt.assert_equal(report.objects, 4)

    # Draw 3 spheres as the primitive type is str
    scene.clear()
    primitive = 'sphere'
    sdf_actor = actor.sdf(centers, directions,
                          colors, primitive, scales)
    scene.add(sdf_actor)
    scene.add(actor.axes())
    if interactive:
        window.show(scene)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, colors=colors)
    npt.assert_equal(report.objects, 4)

    # A sphere and default back to two torus
    # as the primitive type is list
    scene.clear()
    primitive = ['sphere']
    with npt.assert_warns(UserWarning):
        sdf_actor = actor.sdf(centers, directions, colors,
                              primitive, scales)

    scene.add(sdf_actor)
    scene.add(actor.axes())
    if interactive:
        window.show(scene)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, colors=colors)
    npt.assert_equal(report.objects, 4)

    # One sphere and ellipsoid each
    # Default to torus
    scene.clear()
    primitive = ['sphere', 'ellipsoid']
    with npt.assert_warns(UserWarning):
        sdf_actor = actor.sdf(centers, directions,
                              colors, primitive, scales)

    scene.add(sdf_actor)
    scene.add(actor.axes())
    if interactive:
        window.show(scene)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, colors=colors)
    npt.assert_equal(report.objects, 4)
Example #10
0
def test_odf_slicer(interactive=False):
    # Prepare our data
    sphere = get_sphere('repulsion100')
    shape = (11, 11, 11, sphere.vertices.shape[0])
    odfs = np.ones(shape)

    affine = np.array([[2.0, 0.0, 0.0, 3.0],
                       [0.0, 2.0, 0.0, 3.0],
                       [0.0, 0.0, 2.0, 1.0],
                       [0.0, 0.0, 0.0, 1.0]])
    mask = np.ones(odfs.shape[:3], bool)
    mask[:4, :4, :4] = False

    # Test that affine and mask work
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, affine=affine, mask=mask,
                                 scale=.25, colormap='blues')

    k = 2
    I, J, _ = odfs.shape[:3]
    odf_actor.display_extent(0, I - 1, 0, J - 1, k, k)

    scene = window.Scene()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()

    if interactive:
        window.show(scene, reset_camera=False)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 11 * 11 - 16)

    # Test that global colormap works
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask, scale=.25,
                                 colormap='blues', norm=False, global_cm=True)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that the most basic odf_slicer instanciation works
    odf_actor = actor.odf_slicer(odfs)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that odf_slicer.display works properly
    scene.clear()
    scene.add(odf_actor)
    scene.add(actor.axes((11, 11, 11)))
    for i in range(11):
        odf_actor.display(i, None, None)
        if interactive:
            window.show(scene)
    for j in range(11):
        odf_actor.display(None, j, None)
        if interactive:
            window.show(scene)

    # With mask equal to zero everything should be black
    mask = np.zeros(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask,
                                 scale=.25, colormap='blues',
                                 norm=False, global_cm=True)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # global_cm=True with colormap=None should raise an error
    npt.assert_raises(IOError, actor.odf_slicer, odfs, sphere=sphere,
                      mask=None, scale=.25, colormap=None, norm=False,
                      global_cm=True)

    # Dimension mismatch between sphere vertices and number
    # of SF coefficients will raise an error.
    npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None,
                      sphere=get_sphere('repulsion200'), scale=.25)

    # colormap=None and global_cm=False results in directionally encoded colors
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=None,
                                 scale=.25, colormap=None,
                                 norm=False, global_cm=False)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that SH coefficients input works
    B = sh_to_sf_matrix(sphere, sh_order=4, return_inv=False)
    odfs = np.zeros((11, 11, 11, B.shape[0]))
    odfs[..., 0] = 1.0
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B)

    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Dimension mismatch between sphere vertices and dimension of
    # B matrix will raise an error.
    npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None,
                      sphere=get_sphere('repulsion200'))

    # Test that constant colormap color works. Also test that sphere
    # normals are oriented correctly. Will show purple spheres with
    # a white contour.
    odf_contour = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B,
                                   colormap=(255, 255, 255))
    odf_contour.GetProperty().SetAmbient(1.0)
    odf_contour.GetProperty().SetFrontfaceCulling(True)

    odf_actor = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B,
                                 colormap=(255, 0, 255), scale=0.4)
    scene.clear()
    scene.add(odf_contour)
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that we can change the sphere on an active actor
    new_sphere = get_sphere('symmetric362')
    new_B = sh_to_sf_matrix(new_sphere, sh_order=4, return_inv=False)
    odf_actor.update_sphere(new_sphere.vertices, new_sphere.faces, new_B)
    if interactive:
        window.show(scene)

    del odf_actor
    del odfs
Example #11
0
def test_odf_slicer(interactive=False):
    # TODO: we should change the odf_slicer to work directly
    # vertices and faces of a sphere rather that needing
    # a specific type of sphere. We can use prim_sphere
    # as an alternative to get_sphere.
    vertices, faces = prim_sphere('repulsion100', True)
    sphere = Sphere()
    sphere.vertices = vertices
    sphere.faces = faces

    shape = (11, 11, 11, 100)
    odfs = np.ones(shape)

    affine = np.array([[2.0, 0.0, 0.0, 3.0],
                       [0.0, 2.0, 0.0, 3.0],
                       [0.0, 0.0, 2.0, 1.0],
                       [0.0, 0.0, 0.0, 1.0]])
    mask = np.ones(odfs.shape[:3], bool)
    mask[:4, :4, :4] = False

    # Test that affine and mask work
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, affine=affine, mask=mask,
                                 scale=.25, colormap='blues')

    k = 2
    I, J, _ = odfs.shape[:3]
    odf_actor.display_extent(0, I - 1, 0, J - 1, k, k)

    scene = window.Scene()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()

    if interactive:
        window.show(scene, reset_camera=False)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 11 * 11 - 16)

    # Test that global colormap works
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask, scale=.25,
                                 colormap='blues', norm=False, global_cm=True)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that the most basic odf_slicer instanciation works
    odf_actor = actor.odf_slicer(odfs)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that odf_slicer.display works properly
    scene.clear()
    scene.add(odf_actor)
    scene.add(actor.axes((11, 11, 11)))
    for i in range(11):
        odf_actor.display(i, None, None)
        if interactive:
            window.show(scene)
    for j in range(11):
        odf_actor.display(None, j, None)
        if interactive:
            window.show(scene)

    # With mask equal to zero everything should be black
    mask = np.zeros(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask,
                                 scale=.25, colormap='blues',
                                 norm=False, global_cm=True)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # global_cm=True with colormap=None should raise an error
    npt.assert_raises(IOError, actor.odf_slicer, odfs, sphere=None,
                      mask=None, scale=.25, colormap=None, norm=False,
                      global_cm=True)

    vertices2, faces2 = prim_sphere('repulsion200', True)
    sphere2 = Sphere()
    sphere2.vertices = vertices2
    sphere2.faces = faces2

    # Dimension mismatch between sphere vertices and number
    # of SF coefficients will raise an error.
    npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None,
                      sphere=sphere2, scale=.25)

    # colormap=None and global_cm=False results in directionally encoded colors
    odf_actor = actor.odf_slicer(odfs, sphere=None, mask=None,
                                 scale=.25, colormap=None,
                                 norm=False, global_cm=False)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    del odf_actor
    del odfs
Example #12
0
def test_slicer(verbose=False):
    scene = window.Scene()
    data = (255 * np.random.rand(50, 50, 50))
    affine = np.eye(4)
    slicer = actor.slicer(data, affine, value_range=[data.min(), data.max()])
    slicer.display(None, None, 25)
    scene.add(slicer)

    scene.reset_camera()
    scene.reset_clipping_range()
    # window.show(scene)

    # copy pixels in numpy array directly
    arr = window.snapshot(scene, 'test_slicer.png', offscreen=True)

    if verbose:
        print(arr.sum())
        print(np.sum(arr == 0))
        print(np.sum(arr > 0))
        print(arr.shape)
        print(arr.dtype)

    report = window.analyze_snapshot(arr, find_objects=True)

    npt.assert_equal(report.objects, 1)
    # print(arr[..., 0])

    # The slicer can cut directly a smaller part of the image
    slicer.display_extent(10, 30, 10, 30, 35, 35)
    scene.ResetCamera()

    scene.add(slicer)

    # save pixels in png file not a numpy array
    with InTemporaryDirectory() as tmpdir:
        fname = os.path.join(tmpdir, 'slice.png')
        window.snapshot(scene, fname, offscreen=True)
        report = window.analyze_snapshot(fname, find_objects=True)
        npt.assert_equal(report.objects, 1)

    # Test Errors
    data_4d = (255 * np.random.rand(50, 50, 50, 50))
    npt.assert_raises(ValueError, actor.slicer, data_4d)
    npt.assert_raises(ValueError, actor.slicer, np.ones(10))

    scene.clear()

    rgb = np.zeros((30, 30, 30, 3), dtype='f8')
    rgb[..., 0] = 255
    rgb_actor = actor.slicer(rgb)

    scene.add(rgb_actor)

    scene.reset_camera()
    scene.reset_clipping_range()

    arr = window.snapshot(scene, offscreen=True)
    report = window.analyze_snapshot(arr, colors=[(255, 0, 0)])
    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report.colors_found, [True])

    lut = actor.colormap_lookup_table(scale_range=(0, 255),
                                      hue_range=(0.4, 1.),
                                      saturation_range=(1, 1.),
                                      value_range=(0., 1.))
    scene.clear()
    slicer_lut = actor.slicer(data, lookup_colormap=lut)

    slicer_lut.display(10, None, None)
    slicer_lut.display(None, 10, None)
    slicer_lut.display(None, None, 10)

    slicer_lut.opacity(0.5)
    slicer_lut.tolerance(0.03)
    slicer_lut2 = slicer_lut.copy()
    npt.assert_equal(slicer_lut2.GetOpacity(), 0.5)
    npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03)
    slicer_lut2.opacity(1)
    slicer_lut2.tolerance(0.025)
    slicer_lut2.display(None, None, 10)
    scene.add(slicer_lut2)

    scene.reset_clipping_range()

    arr = window.snapshot(scene, offscreen=True)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 1)

    scene.clear()

    data = (255 * np.random.rand(50, 50, 50))
    affine = np.diag([1, 3, 2, 1])
    slicer = actor.slicer(data, affine, interpolation='nearest')
    slicer.display(None, None, 25)

    scene.add(slicer)
    scene.reset_camera()
    scene.reset_clipping_range()

    arr = window.snapshot(scene, offscreen=True)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 1)
    npt.assert_equal(data.shape, slicer.shape)
    slicer2 = slicer.copy()
    npt.assert_equal(slicer2.shape, slicer.shape)
Example #13
0
def test_bundle_maps():
    scene = window.Scene()
    bundle = simulated_bundle(no_streamlines=10, waves=False)

    metric = 100 * np.ones((200, 200, 200))

    # add lower values
    metric[100, :, :] = 100 * 0.5

    # create a nice orange-red colormap
    lut = actor.colormap_lookup_table(scale_range=(0., 100.),
                                      hue_range=(0., 0.1),
                                      saturation_range=(1, 1),
                                      value_range=(1., 1))

    line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
    scene.add(line)
    scene.add(actor.scalar_bar(lut, ' '))

    report = window.analyze_scene(scene)

    npt.assert_almost_equal(report.actors, 1)
    # window.show(scene)

    scene.clear()

    nb_points = np.sum([len(b) for b in bundle])
    values = 100 * np.random.rand(nb_points)
    # values[:nb_points/2] = 0

    line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
    scene.add(line)
    # window.show(scene)

    report = window.analyze_scene(scene)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    scene.clear()

    colors = np.random.rand(nb_points, 3)
    # values[:nb_points/2] = 0

    line = actor.line(bundle, colors, linewidth=2)
    scene.add(line)
    # window.show(scene)

    report = window.analyze_scene(scene)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
    # window.show(scene)

    arr = window.snapshot(scene)
    report2 = window.analyze_snapshot(arr)
    npt.assert_equal(report2.objects, 1)

    # try other input options for colors
    scene.clear()
    actor.line(bundle, (1., 0.5, 0))
    actor.line(bundle, np.arange(len(bundle)))
    actor.line(bundle)
    colors = [np.random.rand(*b.shape) for b in bundle]
    actor.line(bundle, colors=colors)
Example #14
0
def test_odf_slicer(interactive=False):

    sphere = get_sphere('symmetric362')

    shape = (11, 11, 11, sphere.vertices.shape[0])

    fid, fname = mkstemp(suffix='_odf_slicer.mmap')
    print(fid)
    print(fname)

    odfs = np.memmap(fname, dtype=np.float64, mode='w+',
                     shape=shape)

    odfs[:] = 1

    affine = np.eye(4)
    renderer = window.Renderer()

    mask = np.ones(odfs.shape[:3])
    mask[:4, :4, :4] = 0

    odfs[..., 0] = 1

    odf_actor = actor.odf_slicer(odfs, affine,
                                 mask=mask, sphere=sphere, scale=.25,
                                 colormap='plasma')
    fa = 0. * np.zeros(odfs.shape[:3])
    fa[:, 0, :] = 1.
    fa[:, -1, :] = 1.
    fa[0, :, :] = 1.
    fa[-1, :, :] = 1.
    fa[5, 5, 5] = 1

    k = 5
    I, J, K = odfs.shape[:3]

    fa_actor = actor.slicer(fa, affine)
    fa_actor.display_extent(0, I, 0, J, k, k)
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    odf_actor.display_extent(0, I, 0, J, k, k)
    odf_actor.GetProperty().SetOpacity(1.0)
    if interactive:
        window.show(renderer, reset_camera=False)

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 11 * 11)

    renderer.clear()
    renderer.add(fa_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    mask[:] = 0
    mask[5, 5, 5] = 1
    fa[5, 5, 5] = 0
    fa_actor = actor.slicer(fa, None)
    fa_actor.display(None, None, 5)
    odf_actor = actor.odf_slicer(odfs, None, mask=mask,
                                 sphere=sphere, scale=.25,
                                 colormap='plasma',
                                 norm=False, global_cm=True)
    renderer.clear()
    renderer.add(fa_actor)
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    renderer.clear()
    renderer.add(odf_actor)
    renderer.add(fa_actor)
    odfs[:, :, :] = 1
    mask = np.ones(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs, None, mask=mask,
                                 sphere=sphere, scale=.25,
                                 colormap='plasma',
                                 norm=False, global_cm=True)

    renderer.clear()
    renderer.add(odf_actor)
    renderer.add(fa_actor)
    renderer.add(actor.axes((11, 11, 11)))
    for i in range(11):
        odf_actor.display(i, None, None)
        fa_actor.display(i, None, None)
        if interactive:
            window.show(renderer)
    for j in range(11):
        odf_actor.display(None, j, None)
        fa_actor.display(None, j, None)
        if interactive:
            window.show(renderer)
    # with mask equal to zero everything should be black
    mask = np.zeros(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs, None, mask=mask,
                                 sphere=sphere, scale=.25,
                                 colormap='plasma',
                                 norm=False, global_cm=True)
    renderer.clear()
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors, 1)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    del odf_actor
    odfs._mmap.close()
    del odfs
    os.close(fid)

    os.remove(fname)
Example #15
0
def test_manifest_standard(interactive=False):
    scene = window.Scene()  # Setup scene

    # Setup surface
    surface_actor = _generate_surface()
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(surface_actor)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    scene.clear()  # Reset scene

    # Contour from roi setup
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data, affine, color=np.array([1, 0, 1]))
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(surface)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    scene.clear()  # Reset scene

    # Contour from label setup
    data = np.zeros((50, 50, 50))
    data[5:15, 1:10, 25] = 1.
    data[25:35, 1:10, 25] = 2.
    data[40:49, 1:10, 25] = 3.
    color = np.array([[255, 0, 0],
                      [0, 255, 0],
                      [0, 0, 255]])
    surface = actor.contour_from_label(data, color=color)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(surface)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Streamtube setup
    data1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
    data2 = data1 + np.array([0.5, 0., 0.])
    data = [data1, data2]
    colors = np.array([[1, 0, 0], [0, 0, 1.]])
    tubes = actor.streamtube(data, colors, linewidth=.1)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(tubes)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 2)

    scene.clear()  # Reset scene

    # ODF slicer setup
    if have_dipy:
        from dipy.data import get_sphere
        from tempfile import mkstemp
        sphere = get_sphere('symmetric362')
        shape = (11, 11, 11, sphere.vertices.shape[0])
        fid, fname = mkstemp(suffix='_odf_slicer.mmap')
        odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape)
        odfs[:] = 1
        affine = np.eye(4)
        mask = np.ones(odfs.shape[:3])
        mask[:4, :4, :4] = 0
        odfs[..., 0] = 1
        odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere,
                                     scale=.25, colormap='blues')
        material.manifest_standard(surface_actor, ambient_level=.3,
                                   diffuse_level=.25)
        k = 5
        I, J, _ = odfs.shape[:3]
        odf_actor.display_extent(0, I, 0, J, k, k)
        odf_actor.GetProperty().SetOpacity(1.0)
        scene.add(odf_actor)
        scene.reset_camera()
        scene.reset_clipping_range()
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 11 * 11)

    scene.clear()  # Reset scene

    # Tensor slicer setup
    if have_dipy:
        from dipy.data import get_sphere
        sphere = get_sphere('symmetric724')
        evals = np.array([1.4, .35, .35]) * 10 ** (-3)
        evecs = np.eye(3)
        mevals = np.zeros((3, 2, 4, 3))
        mevecs = np.zeros((3, 2, 4, 3, 3))
        mevals[..., :] = evals
        mevecs[..., :, :] = evecs
        affine = np.eye(4)
        scene = window.Scene()
        tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine,
                                           sphere=sphere, scale=.3)
        material.manifest_standard(surface_actor, ambient_level=.3,
                                   diffuse_level=.25)
        _, J, K = mevals.shape[:3]
        tensor_actor.display_extent(0, 1, 0, J, 0, K)
        scene.add(tensor_actor)
        scene.reset_camera()
        scene.reset_clipping_range()
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 4)

    scene.clear()  # Reset scene

    # Point setup
    points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
    colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    opacity = 0.5
    points_actor = actor.point(points, colors, opacity=opacity)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(points_actor)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Sphere setup
    xyzr = np.array([[0, 0, 0, 10], [100, 0, 0, 25], [200, 0, 0, 50]])
    colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]])
    opacity = 0.5
    sphere_actor = actor.sphere(centers=xyzr[:, :3], colors=colors[:],
                                radii=xyzr[:, 3], opacity=opacity)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(sphere_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Advanced geometry actors setup (Arrow, cone, cylinder)
    xyz = np.array([[0, 0, 0], [50, 0, 0], [100, 0, 0]])
    dirs = np.array([[0, 1, 0], [1, 0, 0], [0, 0.5, 0.5]])
    colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [1, 1, 0, 1]])
    heights = np.array([5, 7, 10])
    actor_list = [[actor.cone, {'directions': dirs, 'resolution': 8}],
                  [actor.arrow, {'directions': dirs, 'resolution': 9}],
                  [actor.cylinder, {'directions': dirs}]]
    for act_func, extra_args in actor_list:
        aga_actor = act_func(centers=xyz, colors=colors[:], heights=heights,
                             **extra_args)
        material.manifest_standard(surface_actor, ambient_level=.3,
                                   diffuse_level=.25)
        scene.add(aga_actor)
        scene.reset_camera()
        scene.reset_clipping_range()
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 3)
        scene.clear()

    # Basic geometry actors (Box, cube, frustum, octagonalprism, rectangle,
    # square)
    centers = np.array([[4, 0, 0], [0, 4, 0], [0, 0, 0]])
    colors = np.array([[1, 0, 0, 0.4], [0, 1, 0, 0.8], [0, 0, 1, 0.5]])
    directions = np.array([[1, 1, 0]])
    scale_list = [1, 2, (1, 1, 1), [3, 2, 1], np.array([1, 2, 3]),
                  np.array([[1, 2, 3], [1, 3, 2], [3, 1, 2]])]
    actor_list = [[actor.box, {}], [actor.cube, {}], [actor.frustum, {}],
                  [actor.octagonalprism, {}], [actor.rectangle, {}],
                  [actor.square, {}]]
    for act_func, extra_args in actor_list:
        for scale in scale_list:
            scene = window.Scene()
            bga_actor = act_func(centers=centers, directions=directions,
                                 colors=colors, scales=scale, **extra_args)
            material.manifest_standard(surface_actor, ambient_level=.3,
                                       diffuse_level=.25)
            scene.add(bga_actor)
            arr = window.snapshot(scene)
            report = window.analyze_snapshot(arr)
            msg = 'Failed with {}, scale={}'.format(act_func.__name__, scale)
            npt.assert_equal(report.objects, 3, err_msg=msg)
            scene.clear()

    # Cone setup using vertices
    centers = np.array([[0, 0, 0], [20, 0, 0], [40, 0, 0]])
    directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
    colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]])
    vertices = np.array([[0.0, 0.0, 0.0], [0.0, 10.0, 0.0],
                         [10.0, 0.0, 0.0], [0.0, 0.0, 10.0]])
    faces = np.array([[0, 1, 3], [0, 1, 2]])
    cone_actor = actor.cone(centers=centers, directions=directions,
                            colors=colors[:], vertices=vertices, faces=faces)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(cone_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Superquadric setup
    centers = np.array([[8, 0, 0], [0, 8, 0], [0, 0, 0]])
    colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    directions = np.random.rand(3, 3)
    scales = [1, 2, 3]
    roundness = np.array([[1, 1], [1, 2], [2, 1]])
    sq_actor = actor.superquadric(centers, roundness=roundness,
                                  directions=directions,
                                  colors=colors.astype(np.uint8),
                                  scales=scales)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(sq_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    ft.assert_greater_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Label setup
    text_actor = actor.label("Hello")
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(text_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 5)

    scene.clear()  # Reset scene

    # Texture setup
    arr = (255 * np.ones((512, 212, 4))).astype('uint8')
    arr[20:40, 20:40, :] = np.array([255, 0, 0, 255], dtype='uint8')
    tp2 = actor.texture(arr)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(tp2)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    scene.clear()  # Reset scene

    # Texture on sphere setup
    arr = 255 * np.ones((810, 1620, 3), dtype='uint8')
    rows, cols, _ = arr.shape
    rs = rows // 2
    cs = cols // 2
    w = 150 // 2
    arr[rs - w: rs + w, cs - 10 * w: cs + 10 * w] = np.array([255, 127, 0])
    tsa = actor.texture_on_sphere(arr)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(tsa)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    # NOTE: From this point on, these actors don't have full support for PBR
    # interpolation. This is, the test passes but there is no evidence of the
    # desired effect.

    """
    # Setup slicer
    data = (255 * np.random.rand(50, 50, 50))
    affine = np.eye(4)
    slicer = actor.slicer(data, affine, value_range=[data.min(), data.max()])
    slicer.display(None, None, 25)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(slicer)
    """

    """
    # Line setup
    data1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
    data2 = data1 + np.array([0.5, 0., 0.])
    data = [data1, data2]
    colors = np.array([[1, 0, 0], [0, 0, 1.]])
    lines = actor.line(data, colors, linewidth=5)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(lines)
    """

    """
    # Scalar bar setup
    lut = actor.colormap_lookup_table(
        scale_range=(0., 100.), hue_range=(0., 0.1), saturation_range=(1, 1),
        value_range=(1., 1))
    sb_actor = actor.scalar_bar(lut, ' ')
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(sb_actor)
    """

    """
    # Axes setup
    axes = actor.axes()
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(axes)
    """

    """
    # Peak slicer setup
    _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4')
    # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape
    peak_dirs = np.zeros((11, 11, 11, 3, 3))
    peak_dirs[:, :, :] = _peak_dirs
    peak_actor = actor.peak_slicer(peak_dirs)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(peak_actor)
    """

    """
    # Dots setup
    points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
    dots_actor = actor.dots(points, color=(0, 255, 0))
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(dots_actor)
    """

    """
    # Text3D setup
    msg = 'I \nlove\n FURY'
    txt_actor = actor.text_3d(msg)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(txt_actor)
    """

    """
    # Figure setup
    arr = (255 * np.ones((512, 212, 4))).astype('uint8')
    arr[20:40, 20:40, 3] = 0
    tp = actor.figure(arr)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(tp)
    """

    """
    # SDF setup
    centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) * 11
    colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
    scales = [1, 2, 3]
    primitive = ['sphere', 'ellipsoid', 'torus']

    sdf_actor = actor.sdf(centers, directions=directions, colors=colors,
                          primitives=primitive, scales=scales)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(sdf_actor)
    """

    # NOTE: For these last set of actors, there is not support for PBR
    # interpolation at all.

    """
    # Billboard setup
    centers = np.array([[0, 0, 0], [5, -5, 5], [-7, 7, -7], [10, 10, 10],
                        [10.5, 11.5, 11.5], [12, -12, -12], [-17, 17, 17],
                        [-22, -22, 22]])
    colors = np.array([[1, 1, 0], [0, 0, 0], [1, 0, 1], [0, 0, 1], [1, 1, 1],
                       [1, 0, 0], [0, 1, 0], [0, 1, 1]])
    scales = [6, .4, 1.2, 1, .2, .7, 3, 2]
    """
    fake_sphere = \
        """
        float len = length(point);
        float radius = 1.;
        if (len > radius)
            discard;
        vec3 normalizedPoint = normalize(vec3(point.xy, sqrt(1. - len)));
        vec3 direction = normalize(vec3(1., 1., 1.));
        float df_1 = max(0, dot(direction, normalizedPoint));
        float sf_1 = pow(df_1, 24);
        fragOutput0 = vec4(max(df_1 * color, sf_1 * vec3(1)), 1);
        """
    """
    billboard_actor = actor.billboard(centers, colors=colors, scales=scales,
                                      fs_impl=fake_sphere)
    material.manifest_pbr(billboard_actor)
    scene.add(billboard_actor)
    """

    if interactive:
        window.show(scene)
Example #16
0
def test_renderer():

    ren = window.Renderer()

    npt.assert_equal(ren.size(), (0, 0))

    # background color for renderer (1, 0.5, 0)
    # 0.001 added here to remove numerical errors when moving from float
    # to int values
    bg_float = (1, 0.501, 0)

    # that will come in the image in the 0-255 uint scale
    bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8'))

    ren.background(bg_float)
    # window.show(ren)
    arr = window.snapshot(ren)

    report = window.analyze_snapshot(arr,
                                     bg_color=bg_color,
                                     colors=[bg_color, (0, 127, 0)])
    npt.assert_equal(report.objects, 0)
    npt.assert_equal(report.colors_found, [True, False])

    axes = actor.axes()
    ren.add(axes)
    # window.show(ren)

    arr = window.snapshot(ren)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 1)

    ren.rm(axes)
    arr = window.snapshot(ren)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 0)

    window.add(ren, axes)
    arr = window.snapshot(ren)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 1)

    ren.rm_all()
    arr = window.snapshot(ren)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 0)

    ren2 = window.Renderer(bg_float)
    ren2.background((0, 0, 0.))

    report = window.analyze_renderer(ren2)
    npt.assert_equal(report.bg_color, (0, 0, 0))

    ren2.add(axes)

    report = window.analyze_renderer(ren2)
    npt.assert_equal(report.actors, 3)

    window.rm(ren2, axes)
    report = window.analyze_renderer(ren2)
    npt.assert_equal(report.actors, 0)
Example #17
0
def test_active_camera():
    scene = window.Scene()
    scene.add(actor.axes(scale=(1, 1, 1)))

    scene.reset_camera()
    scene.reset_clipping_range()

    direction = scene.camera_direction()
    position, focal_point, view_up = scene.get_camera()

    scene.set_camera((0., 0., 1.), (0., 0., 0), view_up)

    position, focal_point, view_up = scene.get_camera()
    npt.assert_almost_equal(np.dot(direction, position), -1)

    scene.zoom(1.5)

    new_position, _, _ = scene.get_camera()

    npt.assert_array_almost_equal(position, new_position)

    scene.zoom(1)

    # rotate around focal point
    scene.azimuth(90)

    position, _, _ = scene.get_camera()

    npt.assert_almost_equal(position, (1.0, 0.0, 0))

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, colors=[(255, 0, 0)])
    npt.assert_equal(report.colors_found, [True])

    # rotate around camera's center
    scene.yaw(90)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, colors=[(0, 0, 0)])
    npt.assert_equal(report.colors_found, [True])

    scene.yaw(-90)
    scene.elevation(90)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, colors=(0, 255, 0))
    npt.assert_equal(report.colors_found, [True])

    scene.set_camera((0., 0., 1.), (0., 0., 0), view_up)

    # vertical rotation of the camera around the focal point
    scene.pitch(10)
    scene.pitch(-10)

    # rotate around the direction of projection
    scene.roll(90)

    # inverted normalized distance from focal point along the direction
    # of the camera

    position, _, _ = scene.get_camera()
    scene.dolly(0.5)
    new_position, focal_point, view_up = scene.get_camera()
    npt.assert_almost_equal(position[2], 0.5 * new_position[2])

    cam = scene.camera()
    npt.assert_equal(new_position, cam.GetPosition())
    npt.assert_equal(focal_point, cam.GetFocalPoint())
    npt.assert_equal(view_up, cam.GetViewUp())
Example #18
0
def test_ribbon(interactive=False):

    scene = window.Scene()

    # Testing if helices and sheets are rendered properly
    atom_coords = np.array([[31.726, 105.084,  71.456],
                            [31.477, 105.680,  70.156],
                            [32.599, 106.655,  69.845],
                            [32.634, 107.264,  68.776],
                            [30.135, 106.407,  70.163],
                            [29.053, 105.662,  70.913],
                            [28.118, 106.591,  71.657],
                            [28.461, 107.741,  71.938],
                            [26.928, 106.097,  71.983],
                            [33.507, 106.802,  70.804],
                            [34.635, 107.689,  70.622],
                            [35.687, 107.018,  69.765],
                            [36.530, 107.689,  69.174],
                            [35.631, 105.690,  69.688],
                            [36.594, 104.921,  68.903],
                            [36.061, 104.498,  67.534],
                            [36.601, 103.580,  66.916],
                            [37.047, 103.645,  69.660],
                            [35.907, 102.828,  69.957],
                            [37.751, 104.014,  70.958]])
    elements = np.array([7, 6, 6, 8, 6, 6, 6, 8, 7, 7, 6, 6, 8, 7, 6, 6, 8, 6,
                         8, 6])
    atom_names = np.array(['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'NE2',
                           'N', 'CA', 'C', 'O', 'N', 'CA', 'C', 'O', 'CB',
                           'OG1', 'OG2'])
    model = np.ones(20)
    chain = np.ones(20)*65
    residue_seq = np.ones(20)
    residue_seq[9:13] = 2
    residue_seq[13:] = 3
    residue_seq[6] = 4
    is_hetatm = np.zeros(20, dtype=bool)
    secondary_structure = np.array([[65, 1, 65, 3]])
    colors = np.array([[240/255, 0, 128/255], [1, 1, 0]])
    for i, color in enumerate(colors):
        if i:
            helix = []
            sheet = secondary_structure
        else:
            helix = secondary_structure
            sheet = []
        molecule = mol.Molecule(elements, atom_coords, atom_names, model,
                                residue_seq, chain, sheet, helix, is_hetatm)
        test_actor = mol.ribbon(molecule)
        scene.set_camera((28, 113, 74), (34, 106, 70), (-0.37, 0.29, -0.88))
        scene.add(test_actor)
        scene.reset_camera()
        scene.reset_clipping_range()

        if interactive:
            window.show(scene)
        npt.assert_equal(scene.GetActors().GetNumberOfItems(), 1)
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr, colors=[color])
        npt.assert_equal(report.objects, 1)
        scene.clear()
Example #19
0
def test_add_shader_callback():
    cube = generate_cube_with_effect()
    showm = window.ShowManager()
    showm.scene.add(cube)

    class Timer(object):
        idx = 0.0

    timer = Timer()

    def timer_callback(obj, event):
        # nonlocal timer, showm
        timer.idx += 1.0
        showm.render()
        if timer.idx > 90:
            showm.exit()

    def my_cbk(_caller, _event, calldata=None):
        program = calldata

        if program is not None:
            try:
                program.SetUniformf("time", timer.idx)
            except ValueError:
                pass

    add_shader_callback(cube, my_cbk)
    showm.initialize()
    showm.add_timer_callback(True, 100, timer_callback)
    showm.start()

    arr = window.snapshot(showm.scene, offscreen=True)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    cone_actor = actor.cone(np.array([[0, 0, 0]]), np.array([[0, 1, 0]]),
                            (0, 0, 1))

    test_values = []

    def callbackLow(_caller, _event, calldata=None):
        program = calldata
        if program is not None:
            test_values.append(0)

    id_observer = add_shader_callback(cone_actor, callbackLow, 0)

    with pytest.raises(Exception):
        add_shader_callback(cone_actor, callbackLow, priority='str')

    mapper = cone_actor.GetMapper()
    mapper.RemoveObserver(id_observer)

    scene = window.Scene()
    scene.add(cone_actor)

    arr1 = window.snapshot(scene, size=(200, 200))
    assert len(test_values) == 0

    test_values = []

    def callbackHigh(_caller, _event, calldata=None):
        program = calldata
        if program is not None:
            test_values.append(999)

    def callbackMean(_caller, _event, calldata=None):
        program = calldata
        if program is not None:
            test_values.append(500)

    add_shader_callback(cone_actor, callbackHigh, 999)
    add_shader_callback(cone_actor, callbackLow, 0)

    id_mean = add_shader_callback(cone_actor, callbackMean, 500)

    # check the priority of each call
    arr2 = window.snapshot(scene, size=(200, 200))
    assert np.abs(
        [test_values[0] - 999, test_values[1] - 500,
         test_values[2] - 0]).sum() == 0

    # check if the correct observer was removed
    mapper.RemoveObserver(id_mean)
    test_values = []

    arr3 = window.snapshot(scene, size=(200, 200))
    assert np.abs([test_values[0] - 999, test_values[1] - 0]).sum() == 0