Esempio n. 1
0
def test_streamtube_and_line_actors():
    scene = window.Scene()

    line1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
    line2 = line1 + np.array([0.5, 0., 0.])

    lines = [line1, line2]
    colors = np.array([[1, 0, 0], [0, 0, 1.]])
    c = actor.line(lines, colors, linewidth=3)
    scene.add(c)

    c = actor.line(lines, colors, spline_subdiv=5, linewidth=3)
    scene.add(c)

    # create streamtubes of the same lines and shift them a bit
    c2 = actor.streamtube(lines, colors, linewidth=.1)
    c2.SetPosition(2, 0, 0)
    scene.add(c2)

    arr = window.snapshot(scene)

    report = window.analyze_snapshot(arr,
                                     colors=[(255, 0, 0), (0, 0, 255)],
                                     find_objects=True)

    npt.assert_equal(report.objects, 4)
    npt.assert_equal(report.colors_found, [True, True])

    # as before with splines
    c2 = actor.streamtube(lines, colors, spline_subdiv=5, linewidth=.1)
    c2.SetPosition(2, 0, 0)
    scene.add(c2)

    arr = window.snapshot(scene)

    report = window.analyze_snapshot(arr,
                                     colors=[(255, 0, 0), (0, 0, 255)],
                                     find_objects=True)

    npt.assert_equal(report.objects, 4)
    npt.assert_equal(report.colors_found, [True, True])

    c3 = actor.line(lines, colors, depth_cue=True, fake_tube=True)

    VTK_9_PLUS = window.vtk.vtkVersion.GetVTKMajorVersion() >= 9
    shader_obj = c3.GetShaderProperty() if VTK_9_PLUS else c3.GetMapper()
    mapper_code = shader_obj.GetGeometryShaderCode()
    file_code = shaders.load("line.geom")
    npt.assert_equal(mapper_code, file_code)

    npt.assert_equal(c3.GetProperty().GetRenderLinesAsTubes(), True)
Esempio n. 2
0
def test_order_transparent():

    scene = window.Scene()

    lines = [
        np.array([[1, 0, 1.], [-1, 0, 1.]]),
        np.array([[1, 0, -1.], [-1, 0, -1.]])
    ]
    colors = np.array([[1., 0., 0.], [0., 1., 0.]])
    stream_actor = actor.streamtube(lines, colors, linewidth=0.3, opacity=0.5)

    scene.add(stream_actor)
    scene.reset_camera()
    scene.reset_clipping_range()

    arr = window.snapshot(scene,
                          fname='green_front.png',
                          offscreen=True,
                          order_transparent=False)

    green_no_ot = arr[150, 150, 1]

    arr = window.snapshot(scene,
                          fname='red_front.png',
                          offscreen=True,
                          order_transparent=True)

    # when order transparency is True green should be weaker
    green_ot = arr[150, 150, 1]

    npt.assert_equal(green_no_ot > green_ot, True)
Esempio n. 3
0
def bounding_box(molecule, colors=(1, 1, 1), linewidth=0.3):
    """Create a bounding box for a molecule.

    Parameters
    ----------
    molecule : Molecule
        The molecule around which the bounding box is to be created.
    colors : tuple (3,) or ndarray of shape (3,), optional
        Color of the bounding box. Default: (1, 1, 1)
    linewidth: float, optional
        Thickness of tubes used to compose bounding box. Default: 0.3

    Returns
    -------
    bbox_actor : vtkActor
        Actor created to serve as a bounding box for a given molecule.

    """
    pts = numpy_to_vtk_points(get_all_atomic_positions(molecule))
    min_x, max_x, min_y, max_y, min_z, max_z = pts.GetBounds()

    lines = np.array([[[min_x, min_y, min_z], [min_x, min_y, max_z]],
                      [[min_x, max_y, min_z], [min_x, max_y, max_z]],
                      [[max_x, min_y, min_z], [max_x, min_y, max_z]],
                      [[max_x, max_y, min_z], [max_x, max_y, max_z]],
                      [[min_x, min_y, min_z], [max_x, min_y, min_z]],
                      [[min_x, max_y, min_z], [max_x, max_y, min_z]],
                      [[min_x, max_y, max_z], [max_x, max_y, max_z]],
                      [[min_x, min_y, max_z], [max_x, min_y, max_z]],
                      [[min_x, min_y, min_z], [min_x, max_y, min_z]],
                      [[max_x, min_y, min_z], [max_x, max_y, min_z]],
                      [[min_x, min_y, max_z], [min_x, max_y, max_z]],
                      [[max_x, min_y, max_z], [max_x, max_y, max_z]]])

    return streamtube(lines, colors=colors, linewidth=linewidth)
Esempio n. 4
0
def test_stereo():
    scene = window.Scene()

    lines = [np.array([[-1, 0, 0.], [1, 0, 0.]]),
             np.array([[-1, 1, 0.], [1, 1, 0.]])]
    colors = np.array([[1., 0., 0.], [0., 1., 0.]])
    stream_actor = actor.streamtube(lines, colors, linewidth=0.3, opacity=0.5)

    scene.add(stream_actor)

    # green in front
    scene.elevation(90)
    scene.camera().OrthogonalizeViewUp()
    scene.reset_clipping_range()

    scene.reset_camera()

    mono = window.snapshot(scene, fname='stereo_off.png', offscreen=True,
                           size=(300, 300), order_transparent=True,
                           stereo='off')

    with npt.assert_warns(UserWarning):
        stereo = window.snapshot(scene, fname='stereo_horizontal.png',
                                 offscreen=True, size=(300, 300),
                                 order_transparent=True, stereo='On')

    # mono render should have values in the center
    # horizontal split stereo render should be empty in the center
    npt.assert_raises(AssertionError, npt.assert_array_equal,
                      mono[150, 150], [0, 0, 0])
    npt.assert_array_equal(stereo[150, 150], [0, 0, 0])
Esempio n. 5
0
def test_order_transparent():

    scene = window.Scene()

    lines = [
        np.array([[-1, 0, 0.], [1, 0, 0.]]),
        np.array([[-1, 1, 0.], [1, 1, 0.]])
    ]
    colors = np.array([[1., 0., 0.], [0., 1., 0.]])
    stream_actor = actor.streamtube(lines, colors, linewidth=0.3, opacity=0.5)

    scene.add(stream_actor)

    scene.reset_camera()

    # green in front
    scene.elevation(90)
    scene.camera().OrthogonalizeViewUp()
    scene.reset_clipping_range()

    scene.reset_camera()

    not_xvfb = os.environ.get("TEST_WITH_XVFB", False)

    if not_xvfb:
        arr = window.snapshot(scene,
                              fname='green_front.png',
                              offscreen=True,
                              order_transparent=False)
    else:
        arr = window.snapshot(scene,
                              fname='green_front.png',
                              offscreen=False,
                              order_transparent=False)

    # therefore the green component must have a higher value (in RGB terms)
    npt.assert_equal(arr[150, 150][1] > arr[150, 150][0], True)

    # red in front
    scene.elevation(-180)
    scene.camera().OrthogonalizeViewUp()
    scene.reset_clipping_range()

    if not_xvfb:
        arr = window.snapshot(scene,
                              fname='red_front.png',
                              offscreen=True,
                              order_transparent=True)
    else:
        arr = window.snapshot(scene,
                              fname='red_front.png',
                              offscreen=False,
                              order_transparent=True)

    # therefore the red component must have a higher value (in RGB terms)
    npt.assert_equal(arr[150, 150][0] > arr[150, 150][1], True)
Esempio n. 6
0
def test_order_transparent():

    renderer = window.Renderer()

    lines = [
        np.array([[-1, 0, 0.], [1, 0, 0.]]),
        np.array([[-1, 1, 0.], [1, 1, 0.]])
    ]
    colors = np.array([[1., 0., 0.], [0., .5, 0.]])
    stream_actor = actor.streamtube(lines, colors, linewidth=0.3, opacity=0.5)
Esempio n. 7
0
def test_streamtube_and_line_actors():
    renderer = window.renderer()

    line1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
    line2 = line1 + np.array([0.5, 0., 0.])

    lines = [line1, line2]
    colors = np.array([[1, 0, 0], [0, 0, 1.]])
    c = actor.line(lines, colors, linewidth=3)
    window.add(renderer, c)

    c = actor.line(lines, colors, spline_subdiv=5, linewidth=3)
    window.add(renderer, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = actor.streamtube(lines, colors, linewidth=.1)
    c2.SetPosition(2, 0, 0)
    window.add(renderer, c2)

    arr = window.snapshot(renderer)

    report = window.analyze_snapshot(arr,
                                     colors=[(255, 0, 0), (0, 0, 255)],
                                     find_objects=True)

    npt.assert_equal(report.objects, 4)
    npt.assert_equal(report.colors_found, [True, True])

    # as before with splines
    c2 = actor.streamtube(lines, colors, spline_subdiv=5, linewidth=.1)
    c2.SetPosition(2, 0, 0)
    window.add(renderer, c2)

    arr = window.snapshot(renderer)

    report = window.analyze_snapshot(arr,
                                     colors=[(255, 0, 0), (0, 0, 255)],
                                     find_objects=True)

    npt.assert_equal(report.objects, 4)
    npt.assert_equal(report.colors_found, [True, True])
Esempio n. 8
0
def show_template_bundles(final_streamlines, template_path, fname):
    import nibabel as nib
    from fury import actor, window
    renderer = window.Renderer()
    template_img_data = nib.load(template_path).get_data().astype('bool')
    template_actor = actor.contour_from_roi(template_img_data,
                                            color=(50, 50, 50), opacity=0.05)
    renderer.add(template_actor)
    lines_actor = actor.streamtube(final_streamlines, window.colors.orange,
                                   linewidth=0.3)
    renderer.add(lines_actor)
    window.record(renderer, n_frames=1, out_path=fname, size=(900, 900))
    return
Esempio n. 9
0
def show_template_bundles(final_streamlines, template_path, fname):
    """Displayes the template bundles

    Parameters
    ----------
    final_streamlines : list
        Generated streamlines
    template_path : str
        Path to reference FA nii.gz file
    fname : str
        Path of the output file (saved as )
    """

    renderer = window.Renderer()
    template_img_data = nib.load(template_path).get_data().astype("bool")
    template_actor = actor.contour_from_roi(
        template_img_data, color=(50, 50, 50), opacity=0.05
    )
    renderer.add(template_actor)
    lines_actor = actor.streamtube(
        final_streamlines, window.colors.orange, linewidth=0.3
    )
    renderer.add(lines_actor)
    window.record(renderer, n_frames=1, out_path=fname, size=(900, 900))
Esempio n. 10
0
##############################################################################
# With box, streamtube and sphere actors, we can create the box, the
# edges of the box and the spheres respectively.

scene = window.Scene()
box_centers = np.array([[0, 0, 0]])
box_directions = np.array([[0, 1, 0]])
box_colors = np.array([[1, 1, 1, 0.2]])
box_actor = actor.box(box_centers,
                      box_directions,
                      box_colors,
                      scales=(box_lx, box_ly, box_lz))
scene.add(box_actor)

lines = box_edges(box_lx, box_ly, box_lz)
line_actor = actor.streamtube(lines, colors=(1, 0.5, 0), linewidth=0.1)
scene.add(line_actor)

sphere_actor = actor.sphere(centers=xyz, colors=colors, radii=radii)
scene.add(sphere_actor)

showm = window.ShowManager(scene,
                           size=(900, 768),
                           reset_camera=True,
                           order_transparent=True)
showm.initialize()
tb = ui.TextBlock2D(bold=True)
scene.zoom(0.8)
scene.azimuth(30)

# use itertools to avoid global variables
Esempio n. 11
0
def test_frame_rate_and_anti_aliasing():
    """Testing frame rate with/out anti-aliasing"""

    length_ = 200
    multi_samples = 32
    max_peels = 8

    st_x = np.arange(length_)
    st_y = np.sin(np.arange(length_))
    st_z = np.zeros(st_x.shape)
    st = np.zeros((length_, 3))
    st[:, 0] = st_x
    st[:, 1] = st_y
    st[:, 2] = st_z

    all_st = []
    all_st.append(st)
    for i in range(1000):
        all_st.append(st + i * np.array([0., .5, 0]))

    # st_actor = actor.line(all_st, linewidth=1)
    # TODO: textblock disappears when lod=True
    st_actor = actor.streamtube(all_st, linewidth=0.1, lod=False)

    scene = window.Scene()
    scene.background((1, 1., 1))

    # quick game style antialiasing
    scene.fxaa_on()
    scene.fxaa_off()

    # the good staff is later with multi-sampling

    tb = ui.TextBlock2D(font_size=40, color=(1, 0.5, 0))

    panel = ui.Panel2D(position=(400, 400), size=(400, 400))
    panel.add_element(tb, (0.2, 0.5))

    counter = itertools.count()
    showm = window.ShowManager(scene,
                               size=(1980, 1080), reset_camera=False,
                               order_transparent=True,
                               multi_samples=multi_samples,
                               max_peels=max_peels,
                               occlusion_ratio=0.0)

    showm.initialize()
    scene.add(panel)
    scene.add(st_actor)
    scene.reset_camera_tight()
    scene.zoom(5)

    class FrameRateHolder(object):
        fpss = []

    frh = FrameRateHolder()

    def timer_callback(_obj, _event):
        cnt = next(counter)
        if cnt % 1 == 0:
            fps = np.round(scene.frame_rate, 0)
            frh.fpss.append(fps)
            msg = "FPS " + str(fps) + ' ' + str(cnt)
            tb.message = msg
            showm.render()
        if cnt > 10:
            showm.exit()

    # Run every 200 milliseconds
    showm.add_timer_callback(True, 200, timer_callback)
    showm.start()

    arr = window.snapshot(scene, size=(1980, 1080),
                          offscreen=True,
                          order_transparent=True,
                          multi_samples=multi_samples,
                          max_peels=max_peels,
                          occlusion_ratio=0.0)
    assert_greater(np.sum(arr), 0)
    # TODO: check why in osx we have issues in Azure
    if not skip_osx:
        assert_greater(np.median(frh.fpss), 0)

    frh.fpss = []
    counter = itertools.count()
    multi_samples = 0
    showm = window.ShowManager(scene,
                               size=(1980, 1080), reset_camera=False,
                               order_transparent=True,
                               multi_samples=multi_samples,
                               max_peels=max_peels,
                               occlusion_ratio=0.0)

    showm.initialize()
    showm.add_timer_callback(True, 200, timer_callback)
    showm.start()

    arr2 = window.snapshot(scene, size=(1980, 1080),
                           offscreen=True,
                           order_transparent=True,
                           multi_samples=multi_samples,
                           max_peels=max_peels,
                           occlusion_ratio=0.0)
    assert_greater(np.sum(arr2), 0)
    if not skip_osx:
        assert_greater(np.median(frh.fpss), 0)
Esempio n. 12
0
def test_bundle_maps():
    scene = window.Scene()
    bundle = simulated_bundle(no_streamlines=10, waves=False)

    metric = 100 * np.ones((200, 200, 200))

    # add lower values
    metric[100, :, :] = 100 * 0.5

    # create a nice orange-red colormap
    lut = actor.colormap_lookup_table(scale_range=(0., 100.),
                                      hue_range=(0., 0.1),
                                      saturation_range=(1, 1),
                                      value_range=(1., 1))

    line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
    scene.add(line)
    scene.add(actor.scalar_bar(lut, ' '))

    report = window.analyze_scene(scene)

    npt.assert_almost_equal(report.actors, 1)
    # window.show(scene)

    scene.clear()

    nb_points = np.sum([len(b) for b in bundle])
    values = 100 * np.random.rand(nb_points)
    # values[:nb_points/2] = 0

    line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
    scene.add(line)
    # window.show(scene)

    report = window.analyze_scene(scene)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    scene.clear()

    colors = np.random.rand(nb_points, 3)
    # values[:nb_points/2] = 0

    line = actor.line(bundle, colors, linewidth=2)
    scene.add(line)
    # window.show(scene)

    report = window.analyze_scene(scene)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
    # window.show(scene)

    arr = window.snapshot(scene)
    report2 = window.analyze_snapshot(arr)
    npt.assert_equal(report2.objects, 1)

    # try other input options for colors
    scene.clear()
    actor.line(bundle, (1., 0.5, 0))
    actor.line(bundle, np.arange(len(bundle)))
    actor.line(bundle)
    colors = [np.random.rand(*b.shape) for b in bundle]
    actor.line(bundle, colors=colors)
Esempio n. 13
0
as model bundles
"""

model_af_l_file, model_cst_l_file = get_two_hcp842_bundles()

sft_af_l = load_trk(model_af_l_file, "same", bbox_valid_check=False)
model_af_l = sft_af_l.streamlines
"""
let's visualize Arcuate Fasiculus Left (AF_L) bundle before assignment maps
"""

interactive = False

scene = window.Scene()
scene.SetBackground(1, 1, 1)
scene.add(actor.streamtube(model_af_l))
scene.set_camera(focal_point=(-18.17281532, -19.55606842, 6.92485857),
                 position=(-360.11, -340.46, -40.44),
                 view_up=(-0.03, 0.028, 0.89))
window.record(scene,
              out_path='af_l_before_assignment_maps.png',
              size=(600, 600))
if interactive:
    window.show(scene)
"""
.. figure:: af_l_before_assignment_maps.png
   :align: center

   AF_L before assignment maps
"""
"""
Esempio n. 14
0
def test_bundle_maps():
    renderer = window.renderer()
    bundle = fornix_streamlines()
    bundle, shift = center_streamlines(bundle)

    mat = np.array([[1, 0, 0, 100],
                    [0, 1, 0, 100],
                    [0, 0, 1, 100],
                    [0, 0, 0, 1.]])

    bundle = transform_streamlines(bundle, mat)

    # metric = np.random.rand(*(200, 200, 200))
    metric = 100 * np.ones((200, 200, 200))

    # add lower values
    metric[100, :, :] = 100 * 0.5

    # create a nice orange-red colormap
    lut = actor.colormap_lookup_table(scale_range=(0., 100.),
                                      hue_range=(0., 0.1),
                                      saturation_range=(1, 1),
                                      value_range=(1., 1))

    line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
    window.add(renderer, line)
    window.add(renderer, actor.scalar_bar(lut, ' '))

    report = window.analyze_renderer(renderer)

    npt.assert_almost_equal(report.actors, 1)
    # window.show(renderer)

    renderer.clear()

    nb_points = np.sum([len(b) for b in bundle])
    values = 100 * np.random.rand(nb_points)
    # values[:nb_points/2] = 0

    line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
    renderer.add(line)
    # window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    renderer.clear()

    colors = np.random.rand(nb_points, 3)
    # values[:nb_points/2] = 0

    line = actor.line(bundle, colors, linewidth=2)
    renderer.add(line)
    # window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
    # window.show(renderer)

    arr = window.snapshot(renderer)
    report2 = window.analyze_snapshot(arr)
    npt.assert_equal(report2.objects, 1)

    # try other input options for colors
    renderer.clear()
    actor.line(bundle, (1., 0.5, 0))
    actor.line(bundle, np.arange(len(bundle)))
    actor.line(bundle)
    colors = [np.random.rand(*b.shape) for b in bundle]
    actor.line(bundle, colors=colors)
Esempio n. 15
0
scene.add(stream_actor9)

if interactive:
    window.show(scene, size=(600, 600), reset_camera=False)

window.record(scene, out_path='bundle9.png', size=(600, 600))

###############################################################################
# Render streamlines as tubes
# ============================================================
#
# For yet more realism, we can use ``streamtube``. Note that this actor
# generates much more geometry than ``line``, so it is more computationally
# expensive. For large datasets, it may be better to approximate tubes using
# the methods described above.

scene.clear()

stream_actor10 = actor.streamtube(bundle_native, linewidth=0.5)

scene.add(stream_actor10)

if interactive:
    window.show(scene, size=(600, 600), reset_camera=False)

window.record(scene, out_path='bundle10.png', size=(600, 600))

###############################################################################
# In summary, we showed that there are many useful ways for visualizing maps
# on bundles.
Esempio n. 16
0
def test_button_and_slider_widgets():
    recording = False
    filename = "test_button_and_slider_widgets.log.gz"
    recording_filename = pjoin(DATA_DIR, filename)
    renderer = window.Renderer()

    # create some minimalistic streamlines
    lines = [
        np.array([[-1, 0, 0.], [1, 0, 0.]]),
        np.array([[-1, 1, 0.], [1, 1, 0.]])
    ]
    colors = np.array([[1., 0., 0.], [0.3, 0.7, 0.]])
    stream_actor = actor.streamtube(lines, colors)

    states = {
        'camera_button_count': 0,
        'plus_button_count': 0,
        'minus_button_count': 0,
        'slider_moved_count': 0,
    }

    renderer.add(stream_actor)

    # the show manager allows to break the rendering process
    # in steps so that the widgets can be added properly
    show_manager = window.ShowManager(renderer, size=(800, 800))

    if recording:
        show_manager.initialize()
        show_manager.render()

    def button_callback(obj, event):
        # print('Camera pressed')
        states['camera_button_count'] += 1

    def button_plus_callback(obj, event):
        # print('+ pressed')
        states['plus_button_count'] += 1

    def button_minus_callback(obj, event):
        # print('- pressed')
        states['minus_button_count'] += 1

    fetch_viz_icons()
    button_png = read_viz_icons(fname='camera.png')

    button = widget.button(show_manager.iren, show_manager.ren,
                           button_callback, button_png, (.98, 1.), (80, 50))

    button_png_plus = read_viz_icons(fname='plus.png')
    button_plus = widget.button(show_manager.iren, show_manager.ren,
                                button_plus_callback, button_png_plus,
                                (.98, .9), (120, 50))

    button_png_minus = read_viz_icons(fname='minus.png')
    button_minus = widget.button(show_manager.iren, show_manager.ren,
                                 button_minus_callback, button_png_minus,
                                 (.98, .9), (50, 50))

    def print_status(obj, event):
        rep = obj.GetRepresentation()
        stream_actor.SetPosition((rep.GetValue(), 0, 0))
        states['slider_moved_count'] += 1

    slider = widget.slider(show_manager.iren,
                           show_manager.ren,
                           callback=print_status,
                           min_value=-1,
                           max_value=1,
                           value=0.,
                           label="X",
                           right_normalized_pos=(.98, 0.6),
                           size=(120, 0),
                           label_format="%0.2lf")

    # This callback is used to update the buttons/sliders' position
    # so they can stay on the right side of the window when the window
    # is being resized.

    global size
    size = renderer.GetSize()

    if recording:
        show_manager.record_events_to_file(recording_filename)
        print(states)
    else:
        show_manager.play_events_from_file(recording_filename)
        npt.assert_equal(states["camera_button_count"], 7)
        npt.assert_equal(states["plus_button_count"], 3)
        npt.assert_equal(states["minus_button_count"], 4)
        npt.assert_equal(states["slider_moved_count"], 116)

    if not recording:
        button.Off()
        slider.Off()
        # Uncomment below to test the slider and button with analyze
        # button.place(renderer)
        # slider.place(renderer)

        report = window.analyze_renderer(renderer)
        # import pylab as plt
        # plt.imshow(report.labels, origin='lower')
        # plt.show()
        npt.assert_equal(report.actors, 1)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors, 1)
Esempio n. 17
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    # The number of labels maps must be equal to the number of bundles
    tmp = args.in_bundles + args.in_labels
    args.in_labels = args.in_bundles[(len(tmp) // 2):] + args.in_labels
    args.in_bundles = args.in_bundles[0:len(tmp) // 2]
    assert_inputs_exist(parser, args.in_bundles + args.in_labels)
    assert_output_dirs_exist_and_empty(parser,
                                       args, [],
                                       optional=args.save_rendering)

    stats = {}
    num_digits_labels = 3
    scene = window.Scene()
    scene.background(tuple(map(int, args.background)))
    for i, filename in enumerate(args.in_bundles):
        sft = load_tractogram_with_reference(parser, args, filename)
        sft.to_vox()
        sft.to_corner()
        img_labels = nib.load(args.in_labels[i])

        # same subject: same header or coregistered subjects: same header
        if not is_header_compatible(sft, args.in_bundles[0]) \
                or not is_header_compatible(img_labels, args.in_bundles[0]):
            parser.error('All headers must be identical.')

        data_labels = img_labels.get_fdata()
        bundle_name, _ = os.path.splitext(os.path.basename(filename))
        unique_labels = np.unique(data_labels)[1:].astype(int)

        # Empty bundle should at least return a json
        if not len(sft):
            tmp_dict = {}
            for label in unique_labels:
                tmp_dict['{}'.format(label).zfill(num_digits_labels)] \
                    = {'mean': 0.0, 'std': 0.0}
            stats[bundle_name] = {'diameter': tmp_dict}
            continue

        counter = 0
        labels_dict = {label: ([], []) for label in unique_labels}
        pts_labels = map_coordinates(data_labels,
                                     sft.streamlines._data.T - 0.5,
                                     order=0)
        # For each label, all positions and directions are needed to get
        # a tube estimation per label.
        for streamline in sft.streamlines:
            direction = np.gradient(streamline, axis=0).tolist()
            curr_labels = pts_labels[counter:counter +
                                     len(streamline)].tolist()

            for i, label in enumerate(curr_labels):
                if label > 0:
                    labels_dict[label][0].append(streamline[i])
                    labels_dict[label][1].append(direction[i])

            counter += len(streamline)

        centroid = np.zeros((len(unique_labels), 3))
        radius = np.zeros((len(unique_labels), 1))
        error = np.zeros((len(unique_labels), 1))
        for key in unique_labels:
            key = int(key)
            c, d, e = fit_circle_in_space(labels_dict[key][0],
                                          labels_dict[key][1],
                                          args.fitting_func)
            centroid[key - 1], radius[key - 1], error[key - 1] = c, d, e

        # Spatial smoothing to avoid degenerate estimation
        centroid_smooth = gaussian_filter(centroid,
                                          sigma=[1, 0],
                                          mode='nearest')
        centroid_smooth[::len(centroid) - 1] = centroid[::len(centroid) - 1]
        radius = gaussian_filter(radius, sigma=1, mode='nearest')
        error = gaussian_filter(error, sigma=1, mode='nearest')

        tmp_dict = {}
        for label in unique_labels:
            tmp_dict['{}'.format(label).zfill(num_digits_labels)] \
                = {'mean': float(radius[label-1])*2,
                   'std': float(error[label-1])}
        stats[bundle_name] = {'diameter': tmp_dict}

        if args.show_rendering or args.save_rendering:
            tube_actor = create_tube_with_radii(
                centroid_smooth,
                radius,
                error,
                wireframe=args.wireframe,
                error_coloring=args.error_coloring)
            scene.add(tube_actor)
            cmap = plt.get_cmap('jet')
            coloring = cmap(pts_labels / np.max(pts_labels))[:, 0:3]
            streamlines_actor = actor.streamtube(sft.streamlines,
                                                 linewidth=args.width,
                                                 opacity=args.opacity,
                                                 colors=coloring)
            scene.add(streamlines_actor)

            slice_actor = actor.slicer(data_labels, np.eye(4))
            slice_actor.opacity(0.0)
            scene.add(slice_actor)

    # If there's actually streamlines to display
    if args.show_rendering:
        showm = window.ShowManager(scene, reset_camera=True)
        showm.initialize()
        showm.start()
    elif args.save_rendering:
        scene.reset_camera()
        snapshot(scene,
                 os.path.join(args.save_rendering, 'superior.png'),
                 size=(1920, 1080),
                 offscreen=True)

        scene.pitch(180)
        scene.reset_camera()
        snapshot(scene,
                 os.path.join(args.save_rendering, 'inferior.png'),
                 size=(1920, 1080),
                 offscreen=True)

        scene.pitch(90)
        scene.set_camera(view_up=(0, 0, 1))
        scene.reset_camera()
        snapshot(scene,
                 os.path.join(args.save_rendering, 'posterior.png'),
                 size=(1920, 1080),
                 offscreen=True)

        scene.pitch(180)
        scene.set_camera(view_up=(0, 0, 1))
        scene.reset_camera()
        snapshot(scene,
                 os.path.join(args.save_rendering, 'anterior.png'),
                 size=(1920, 1080),
                 offscreen=True)

        scene.yaw(90)
        scene.reset_camera()
        snapshot(scene,
                 os.path.join(args.save_rendering, 'right.png'),
                 size=(1920, 1080),
                 offscreen=True)

        scene.yaw(180)
        scene.reset_camera()
        snapshot(scene,
                 os.path.join(args.save_rendering, 'left.png'),
                 size=(1920, 1080),
                 offscreen=True)
    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
Esempio n. 18
0
def screenshot_tracking(tracking, t1, directory="."):
    """
    Compute 3 view screenshot with streamlines on T1.

    Parameters
    ----------
    tracking : string
        tractogram filename.
    t1 : string
        t1 filename.
    directory : string
        Directory to save the mosaic.

    Returns
    -------
    name : string
        Path of the mosaic
    """
    tractogram = nib.streamlines.load(tracking, True).tractogram
    t1 = nib.load(t1)
    t1_data = t1.get_data()

    slice_name = ['sagittal', 'coronal', 'axial']
    img_center = [(int(t1_data.shape[0] / 2) + 5, None, None),
                  (None, int(t1_data.shape[1] / 2), None),
                  (None, None, int(t1_data.shape[2] / 2))]
    center = [(330, 90, 60), (70, 330, 60), (70, 90, 400)]
    viewup = [(0, 0, -1), (0, 0, -1), (0, -1, 0)]
    size = (1920, 1080)

    image = np.array([])
    for i, _axis in enumerate(slice_name):
        streamlines = []
        it = 0
        slice_idx = img_center[i][i]

        for streamline in tractogram:
            if it > 10000:
                break
            stream = streamline.streamline
            if slice_idx in np.array(stream, dtype=int)[:, i]:
                it += 1
                idx = np.where(np.array(stream, dtype=int)[:, i] == \
                               slice_idx)[0][0]
                lower = idx - 2
                if lower < 0:
                    lower = 0
                upper = idx + 2
                if upper > len(stream) - 1:
                    upper = len(stream) - 1
                streamlines.append(stream[lower:upper])

        ren = window.Renderer()

        streamline_actor = actor.line(streamlines, linewidth=0.2)
        ren.add(streamline_actor)

        min_val = np.min(t1_data[t1_data > 0])
        max_val = np.percentile(t1_data[t1_data > 0], 99)
        t1_color = np.float32(t1_data - min_val) \
                   / np.float32(max_val - min_val) * 255.0
        slice_actor = actor.slicer(t1_color, opacity=0.8, value_range=(0, 255),
                                   interpolation='nearest')
        ren.add(slice_actor)
        slice_actor.display(img_center[i][0], img_center[i][1],
                            img_center[i][2])

        camera = ren.GetActiveCamera()
        camera.SetViewUp(viewup[i])
        center_cam = streamline_actor.GetCenter()
        camera.SetPosition(center[i])
        camera.SetFocalPoint((center_cam))

        img2 = renderer_to_arr(ren, size)
        if image.size == 0:
            image = img2
        else:
            image = np.hstack((image, img2))

    streamlines = []
    it = 0
    for streamline in tractogram:
        if it > 10000:
            break
        it += 1
        streamlines.append(streamline.streamline)

    ren = window.Renderer()
    streamline_actor = actor.streamtube(streamlines, linewidth=0.2)
    ren.add(streamline_actor)
    camera = ren.GetActiveCamera()
    camera.SetViewUp(0, 0, -1)
    center = streamline_actor.GetCenter()
    camera.SetPosition(center[0], 350, center[2])
    camera.SetFocalPoint(center)
    img2 = renderer_to_arr(ren, (3 * 1920, 1920))
    image = np.vstack((image, img2))

    imgs_comb = Image.fromarray(image)
    imgs_comb = imgs_comb.resize((3 * 1920, 1920 + 1080))
    image_name = os.path.basename(str(tracking)).split(".")[0]
    name = os.path.join(directory, image_name + '.png')
    imgs_comb.save(name)

    return name
Esempio n. 19
0
def test_custom_interactor_style_events(recording=False):
    print("Using VTK {}".format(vtk.vtkVersion.GetVTKVersion()))
    filename = "test_custom_interactor_style_events.log.gz"
    recording_filename = pjoin(DATA_DIR, filename)
    scene = window.Scene()

    # the show manager allows to break the rendering process
    # in steps so that the widgets can be added properly
    interactor_style = interactor.CustomInteractorStyle()
    show_manager = window.ShowManager(scene,
                                      size=(800, 800),
                                      reset_camera=False,
                                      interactor_style=interactor_style)

    # Create a cursor, a circle that will follow the mouse.
    polygon_source = vtk.vtkRegularPolygonSource()
    polygon_source.GeneratePolygonOff()  # Only the outline of the circle.
    polygon_source.SetNumberOfSides(50)
    polygon_source.SetRadius(10)
    # polygon_source.SetRadius
    polygon_source.SetCenter(0, 0, 0)

    mapper = vtk.vtkPolyDataMapper2D()
    vtk_utils.set_input(mapper, polygon_source.GetOutputPort())

    cursor = vtk.vtkActor2D()
    cursor.SetMapper(mapper)
    cursor.GetProperty().SetColor(1, 0.5, 0)
    scene.add(cursor)

    def follow_mouse(iren, obj):
        obj.SetPosition(*iren.event.position)
        iren.force_render()

    interactor_style.add_active_prop(cursor)
    interactor_style.add_callback(cursor, "MouseMoveEvent", follow_mouse)

    # create some minimalistic streamlines
    lines = [
        np.array([[-1, 0, 0.], [1, 0, 0.]]),
        np.array([[-1, 1, 0.], [1, 1, 0.]])
    ]
    colors = np.array([[1., 0., 0.], [0.3, 0.7, 0.]])
    tube1 = actor.streamtube([lines[0]], colors[0])
    tube2 = actor.streamtube([lines[1]], colors[1])
    scene.add(tube1)
    scene.add(tube2)

    # Define some counter callback.
    states = defaultdict(lambda: 0)

    def counter(iren, _obj):
        states[iren.event.name] += 1

    # Assign the counter callback to every possible event.
    for event in [
            "CharEvent", "MouseMoveEvent", "KeyPressEvent", "KeyReleaseEvent",
            "LeftButtonPressEvent", "LeftButtonReleaseEvent",
            "RightButtonPressEvent", "RightButtonReleaseEvent",
            "MiddleButtonPressEvent", "MiddleButtonReleaseEvent"
    ]:
        interactor_style.add_callback(tube1, event, counter)

    # Add callback to scale up/down tube1.
    def scale_up_obj(iren, obj):
        counter(iren, obj)
        scale = np.asarray(obj.GetScale()) + 0.1
        obj.SetScale(*scale)
        iren.force_render()
        iren.event.abort()  # Stop propagating the event.

    def scale_down_obj(iren, obj):
        counter(iren, obj)
        scale = np.array(obj.GetScale()) - 0.1
        obj.SetScale(*scale)
        iren.force_render()
        iren.event.abort()  # Stop propagating the event.

    interactor_style.add_callback(tube2, "MouseWheelForwardEvent",
                                  scale_up_obj)
    interactor_style.add_callback(tube2, "MouseWheelBackwardEvent",
                                  scale_down_obj)

    # Add callback to hide/show tube1.
    def toggle_visibility(iren, obj):
        key = iren.event.key
        if key.lower() == "v":
            obj.SetVisibility(not obj.GetVisibility())
            iren.force_render()

    interactor_style.add_active_prop(tube1)
    interactor_style.add_active_prop(tube2)
    interactor_style.remove_active_prop(tube2)
    interactor_style.add_callback(tube1, "CharEvent", toggle_visibility)

    if recording:
        show_manager.record_events_to_file(recording_filename)
        print(list(states.items()))
    else:
        show_manager.play_events_from_file(recording_filename)
        msg = ("Wrong count for '{}'.")
        expected = [('CharEvent', 6), ('KeyPressEvent', 6),
                    ('KeyReleaseEvent', 6), ('MouseMoveEvent', 1652),
                    ('LeftButtonPressEvent', 1), ('RightButtonPressEvent', 1),
                    ('MiddleButtonPressEvent', 2),
                    ('LeftButtonReleaseEvent', 1),
                    ('MouseWheelForwardEvent', 3),
                    ('MouseWheelBackwardEvent', 1),
                    ('MiddleButtonReleaseEvent', 2),
                    ('RightButtonReleaseEvent', 1)]

        # Useful loop for debugging.
        for event, count in expected:
            if states[event] != count:
                print("{}: {} vs. {} (expected)".format(
                    event, states[event], count))

        for event, count in expected:
            npt.assert_equal(states[event], count, err_msg=msg.format(event))
Esempio n. 20
0
def test_manifest_standard(interactive=False):
    scene = window.Scene()  # Setup scene

    # Setup surface
    surface_actor = _generate_surface()
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(surface_actor)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    scene.clear()  # Reset scene

    # Contour from roi setup
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data, affine, color=np.array([1, 0, 1]))
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(surface)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    scene.clear()  # Reset scene

    # Contour from label setup
    data = np.zeros((50, 50, 50))
    data[5:15, 1:10, 25] = 1.
    data[25:35, 1:10, 25] = 2.
    data[40:49, 1:10, 25] = 3.
    color = np.array([[255, 0, 0],
                      [0, 255, 0],
                      [0, 0, 255]])
    surface = actor.contour_from_label(data, color=color)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(surface)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Streamtube setup
    data1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
    data2 = data1 + np.array([0.5, 0., 0.])
    data = [data1, data2]
    colors = np.array([[1, 0, 0], [0, 0, 1.]])
    tubes = actor.streamtube(data, colors, linewidth=.1)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(tubes)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 2)

    scene.clear()  # Reset scene

    # ODF slicer setup
    if have_dipy:
        from dipy.data import get_sphere
        from tempfile import mkstemp
        sphere = get_sphere('symmetric362')
        shape = (11, 11, 11, sphere.vertices.shape[0])
        fid, fname = mkstemp(suffix='_odf_slicer.mmap')
        odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape)
        odfs[:] = 1
        affine = np.eye(4)
        mask = np.ones(odfs.shape[:3])
        mask[:4, :4, :4] = 0
        odfs[..., 0] = 1
        odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere,
                                     scale=.25, colormap='blues')
        material.manifest_standard(surface_actor, ambient_level=.3,
                                   diffuse_level=.25)
        k = 5
        I, J, _ = odfs.shape[:3]
        odf_actor.display_extent(0, I, 0, J, k, k)
        odf_actor.GetProperty().SetOpacity(1.0)
        scene.add(odf_actor)
        scene.reset_camera()
        scene.reset_clipping_range()
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 11 * 11)

    scene.clear()  # Reset scene

    # Tensor slicer setup
    if have_dipy:
        from dipy.data import get_sphere
        sphere = get_sphere('symmetric724')
        evals = np.array([1.4, .35, .35]) * 10 ** (-3)
        evecs = np.eye(3)
        mevals = np.zeros((3, 2, 4, 3))
        mevecs = np.zeros((3, 2, 4, 3, 3))
        mevals[..., :] = evals
        mevecs[..., :, :] = evecs
        affine = np.eye(4)
        scene = window.Scene()
        tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine,
                                           sphere=sphere, scale=.3)
        material.manifest_standard(surface_actor, ambient_level=.3,
                                   diffuse_level=.25)
        _, J, K = mevals.shape[:3]
        tensor_actor.display_extent(0, 1, 0, J, 0, K)
        scene.add(tensor_actor)
        scene.reset_camera()
        scene.reset_clipping_range()
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 4)

    scene.clear()  # Reset scene

    # Point setup
    points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
    colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    opacity = 0.5
    points_actor = actor.point(points, colors, opacity=opacity)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(points_actor)
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Sphere setup
    xyzr = np.array([[0, 0, 0, 10], [100, 0, 0, 25], [200, 0, 0, 50]])
    colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]])
    opacity = 0.5
    sphere_actor = actor.sphere(centers=xyzr[:, :3], colors=colors[:],
                                radii=xyzr[:, 3], opacity=opacity)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(sphere_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Advanced geometry actors setup (Arrow, cone, cylinder)
    xyz = np.array([[0, 0, 0], [50, 0, 0], [100, 0, 0]])
    dirs = np.array([[0, 1, 0], [1, 0, 0], [0, 0.5, 0.5]])
    colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [1, 1, 0, 1]])
    heights = np.array([5, 7, 10])
    actor_list = [[actor.cone, {'directions': dirs, 'resolution': 8}],
                  [actor.arrow, {'directions': dirs, 'resolution': 9}],
                  [actor.cylinder, {'directions': dirs}]]
    for act_func, extra_args in actor_list:
        aga_actor = act_func(centers=xyz, colors=colors[:], heights=heights,
                             **extra_args)
        material.manifest_standard(surface_actor, ambient_level=.3,
                                   diffuse_level=.25)
        scene.add(aga_actor)
        scene.reset_camera()
        scene.reset_clipping_range()
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 3)
        scene.clear()

    # Basic geometry actors (Box, cube, frustum, octagonalprism, rectangle,
    # square)
    centers = np.array([[4, 0, 0], [0, 4, 0], [0, 0, 0]])
    colors = np.array([[1, 0, 0, 0.4], [0, 1, 0, 0.8], [0, 0, 1, 0.5]])
    directions = np.array([[1, 1, 0]])
    scale_list = [1, 2, (1, 1, 1), [3, 2, 1], np.array([1, 2, 3]),
                  np.array([[1, 2, 3], [1, 3, 2], [3, 1, 2]])]
    actor_list = [[actor.box, {}], [actor.cube, {}], [actor.frustum, {}],
                  [actor.octagonalprism, {}], [actor.rectangle, {}],
                  [actor.square, {}]]
    for act_func, extra_args in actor_list:
        for scale in scale_list:
            scene = window.Scene()
            bga_actor = act_func(centers=centers, directions=directions,
                                 colors=colors, scales=scale, **extra_args)
            material.manifest_standard(surface_actor, ambient_level=.3,
                                       diffuse_level=.25)
            scene.add(bga_actor)
            arr = window.snapshot(scene)
            report = window.analyze_snapshot(arr)
            msg = 'Failed with {}, scale={}'.format(act_func.__name__, scale)
            npt.assert_equal(report.objects, 3, err_msg=msg)
            scene.clear()

    # Cone setup using vertices
    centers = np.array([[0, 0, 0], [20, 0, 0], [40, 0, 0]])
    directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
    colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]])
    vertices = np.array([[0.0, 0.0, 0.0], [0.0, 10.0, 0.0],
                         [10.0, 0.0, 0.0], [0.0, 0.0, 10.0]])
    faces = np.array([[0, 1, 3], [0, 1, 2]])
    cone_actor = actor.cone(centers=centers, directions=directions,
                            colors=colors[:], vertices=vertices, faces=faces)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(cone_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Superquadric setup
    centers = np.array([[8, 0, 0], [0, 8, 0], [0, 0, 0]])
    colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    directions = np.random.rand(3, 3)
    scales = [1, 2, 3]
    roundness = np.array([[1, 1], [1, 2], [2, 1]])
    sq_actor = actor.superquadric(centers, roundness=roundness,
                                  directions=directions,
                                  colors=colors.astype(np.uint8),
                                  scales=scales)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(sq_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    ft.assert_greater_equal(report.objects, 3)

    scene.clear()  # Reset scene

    # Label setup
    text_actor = actor.label("Hello")
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(text_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 5)

    scene.clear()  # Reset scene

    # Texture setup
    arr = (255 * np.ones((512, 212, 4))).astype('uint8')
    arr[20:40, 20:40, :] = np.array([255, 0, 0, 255], dtype='uint8')
    tp2 = actor.texture(arr)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(tp2)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    scene.clear()  # Reset scene

    # Texture on sphere setup
    arr = 255 * np.ones((810, 1620, 3), dtype='uint8')
    rows, cols, _ = arr.shape
    rs = rows // 2
    cs = cols // 2
    w = 150 // 2
    arr[rs - w: rs + w, cs - 10 * w: cs + 10 * w] = np.array([255, 127, 0])
    tsa = actor.texture_on_sphere(arr)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(tsa)
    scene.reset_camera()
    scene.reset_clipping_range()
    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 1)

    # NOTE: From this point on, these actors don't have full support for PBR
    # interpolation. This is, the test passes but there is no evidence of the
    # desired effect.

    """
    # Setup slicer
    data = (255 * np.random.rand(50, 50, 50))
    affine = np.eye(4)
    slicer = actor.slicer(data, affine, value_range=[data.min(), data.max()])
    slicer.display(None, None, 25)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(slicer)
    """

    """
    # Line setup
    data1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
    data2 = data1 + np.array([0.5, 0., 0.])
    data = [data1, data2]
    colors = np.array([[1, 0, 0], [0, 0, 1.]])
    lines = actor.line(data, colors, linewidth=5)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(lines)
    """

    """
    # Scalar bar setup
    lut = actor.colormap_lookup_table(
        scale_range=(0., 100.), hue_range=(0., 0.1), saturation_range=(1, 1),
        value_range=(1., 1))
    sb_actor = actor.scalar_bar(lut, ' ')
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(sb_actor)
    """

    """
    # Axes setup
    axes = actor.axes()
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(axes)
    """

    """
    # Peak slicer setup
    _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4')
    # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape
    peak_dirs = np.zeros((11, 11, 11, 3, 3))
    peak_dirs[:, :, :] = _peak_dirs
    peak_actor = actor.peak_slicer(peak_dirs)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(peak_actor)
    """

    """
    # Dots setup
    points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
    dots_actor = actor.dots(points, color=(0, 255, 0))
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(dots_actor)
    """

    """
    # Text3D setup
    msg = 'I \nlove\n FURY'
    txt_actor = actor.text_3d(msg)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(txt_actor)
    """

    """
    # Figure setup
    arr = (255 * np.ones((512, 212, 4))).astype('uint8')
    arr[20:40, 20:40, 3] = 0
    tp = actor.figure(arr)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(tp)
    """

    """
    # SDF setup
    centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0]]) * 11
    colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
    scales = [1, 2, 3]
    primitive = ['sphere', 'ellipsoid', 'torus']

    sdf_actor = actor.sdf(centers, directions=directions, colors=colors,
                          primitives=primitive, scales=scales)
    material.manifest_standard(surface_actor, ambient_level=.3,
                               diffuse_level=.25)
    scene.add(sdf_actor)
    """

    # NOTE: For these last set of actors, there is not support for PBR
    # interpolation at all.

    """
    # Billboard setup
    centers = np.array([[0, 0, 0], [5, -5, 5], [-7, 7, -7], [10, 10, 10],
                        [10.5, 11.5, 11.5], [12, -12, -12], [-17, 17, 17],
                        [-22, -22, 22]])
    colors = np.array([[1, 1, 0], [0, 0, 0], [1, 0, 1], [0, 0, 1], [1, 1, 1],
                       [1, 0, 0], [0, 1, 0], [0, 1, 1]])
    scales = [6, .4, 1.2, 1, .2, .7, 3, 2]
    """
    fake_sphere = \
        """
        float len = length(point);
        float radius = 1.;
        if (len > radius)
            discard;
        vec3 normalizedPoint = normalize(vec3(point.xy, sqrt(1. - len)));
        vec3 direction = normalize(vec3(1., 1., 1.));
        float df_1 = max(0, dot(direction, normalizedPoint));
        float sf_1 = pow(df_1, 24);
        fragOutput0 = vec4(max(df_1 * color, sf_1 * vec3(1)), 1);
        """
    """
    billboard_actor = actor.billboard(centers, colors=colors, scales=scales,
                                      fs_impl=fake_sphere)
    material.manifest_pbr(billboard_actor)
    scene.add(billboard_actor)
    """

    if interactive:
        window.show(scene)
Esempio n. 21
0
    def render(
        self,
        tractogram: Tractogram = None,
        filename: str = None
    ):
        """ Render the streamlines, either directly or through a file
        Might render from "outside" the environment, like for comet

        Parameters:
        -----------
        tractogram: Tractogram, optional
            Object containing the streamlines and seeds
        path: str, optional
            If set, save the image at the specified location instead
            of displaying directly
        """
        from fury import window, actor
        # Might be rendering from outside the environment
        if tractogram is None:
            tractogram = Tractogram(
                streamlines=self.streamlines[:, :self.length],
                data_per_streamline={
                    'seeds': self.starting_points
                })

        # Reshape peaks for displaying
        X, Y, Z, M = self.peaks.data.shape
        peaks = np.reshape(self.peaks.data, (X, Y, Z, 5, M//5))

        # Setup scene and actors
        scene = window.Scene()

        stream_actor = actor.streamtube(tractogram.streamlines)
        peak_actor = actor.peak_slicer(peaks,
                                       np.ones((X, Y, Z, M)),
                                       colors=(0.2, 0.2, 1.),
                                       opacity=0.5)
        dot_actor = actor.dots(tractogram.data_per_streamline['seeds'],
                               color=(1, 1, 1),
                               opacity=1,
                               dot_size=2.5)
        scene.add(stream_actor)
        scene.add(peak_actor)
        scene.add(dot_actor)
        scene.reset_camera_tight(0.95)

        # Save or display scene
        if filename is not None:
            directory = os.path.dirname(pjoin(self.experiment_path, 'render'))
            if not os.path.exists(directory):
                os.makedirs(directory)
            dest = pjoin(directory, filename)
            window.snapshot(
                scene,
                fname=dest,
                offscreen=True,
                size=(800, 800))
        else:
            showm = window.ShowManager(scene, reset_camera=True)
            showm.initialize()
            showm.start()