def afficher_tenseurs(fa_,evec,eva) :
    cfa = dti.color_fa(fa_, evec)
    sphere = dpd.default_sphere
    ren = window.Renderer()
    ren.add(actor.tensor_slicer(eva, evec, scalar_colors=cfa, sphere=sphere,
                                scale=0.5))
    window.record(ren, out_path='tensor.png', size=(1200, 1200))
Example #2
0
def test_parallel_projection():

    ren = window.Renderer()
    axes = actor.axes()
    ren.add(axes)

    axes2 = actor.axes()
    axes2.SetPosition((2, 0, 0))
    ren.add(axes2)

    # Put the camera on a angle so that the
    # camera can show the difference between perspective
    # and parallel projection
    ren.set_camera((1.5, 1.5, 1.5))
    ren.GetActiveCamera().Zoom(2)

    # window.show(ren, reset_camera=True)
    ren.reset_camera()
    arr = window.snapshot(ren)

    ren.projection('parallel')
    # window.show(ren, reset_camera=False)
    arr2 = window.snapshot(ren)
    # Because of the parallel projection the two axes
    # will have the same size and therefore occupy more
    # pixels rather than in perspective projection were
    # the axes being further will be smaller.
    npt.assert_equal(np.sum(arr2 > 0) > np.sum(arr > 0), True)
Example #3
0
def print_peaks(sh_signal, mask=None):
    if has_fury:
        data_small = sh_signal[:, :, 50:51]
        ren = window.Renderer()

        sh_order = order_from_ncoef(data_small.shape[-1])
        theta = default_sphere.theta
        phi = default_sphere.phi
        sh_params = SIGNAL_PARAMETERS['processing_params']['sh_params']
        basis_type = sh_params['basis_type']
        sph_harm_basis = sph_harm_lookup.get(basis_type)
        sampling_matrix, m, n = sph_harm_basis(sh_order, theta, phi)
        odfs = np.dot(data_small, sampling_matrix.T)

        odfs = np.clip(odfs, 0, np.max(odfs, -1)[..., None])
        odfs_actor = actor.odf_slicer(odfs,
                                      sphere=default_sphere,
                                      colormap='plasma',
                                      scale=0.4)
        odfs_actor.display(z=0)

        ren.add(odfs_actor)
        print('Saving illustration as csa_odfs.png')
        window.record(ren,
                      n_frames=1,
                      out_path='csa_odfs.png',
                      size=(600, 600))
        window.show(ren)
Example #4
0
def show_cc_parts_weighted(streamlines_g, streamlines_b, streamlines_s, g_mean,
                           b_mean, s_mean, folder_name, lut_cmap, bar):

    mean_g_vec = [g_mean] * streamlines_g.__len__()
    mean_b_vec = [b_mean] * streamlines_b.__len__()
    mean_s_vec = [s_mean] * streamlines_s.__len__()

    genu_actor = actor.line(streamlines_g,
                            mean_g_vec,
                            linewidth=0.5,
                            lookup_colormap=lut_cmap)

    body_actor = actor.line(streamlines_b,
                            mean_b_vec,
                            linewidth=0.5,
                            lookup_colormap=lut_cmap)

    splenium_actor = actor.line(streamlines_s,
                                mean_s_vec,
                                linewidth=0.5,
                                lookup_colormap=lut_cmap)

    r = window.Renderer()
    r.add(genu_actor)
    r.add(body_actor)
    r.add(splenium_actor)
    r.add(bar)

    window.show(r)

    save_as = folder_name + '\cc_parts.png'
    r.set_camera(r.camera_info())
    window.record(r, out_path=save_as, size=(800, 800))
Example #5
0
def show_bundles(bundles,
                 colors=None,
                 show=True,
                 fname=None,
                 fa=False,
                 str_tube=False):

    ren = window.Renderer()
    ren.SetBackground(1., 1, 1)
    if str_tube:
        bundle_actor = actor.streamtube(bundles, colors, linewidth=0.5)
        ren.add(bundle_actor)
    else:
        for (i, bundle) in enumerate(bundles):
            color = colors[i]
            #         lines_actor = actor.streamtube(bundle, color, linewidth=0.05

            lines_actor = actor.line(bundle, color, linewidth=2.5)
            #lines_actor.RotateX(-90)
            #lines_actor.RotateZ(90)
            ren.add(lines_actor)

    if fa:
        fa, affine_fa = load_nifti(
            '/Users/alex/code/Wenlin/data/wenlin_results/bmfaN54900.nii.gz')
        fa_actor = actor.slicer(fa, affine_fa)
        ren.add(fa_actor)

    if show:
        window.show(ren)
    if fname is not None:
        sleep(1)
        window.record(ren, n_frames=1, out_path=fname, size=(900, 900))
Example #6
0
def plot_response(response_src, out_png=False):

    # start virtual display
    print("starting Xvfb");
    vdisplay = Xvfb()
    vdisplay.start()

    response_src = np.loadtxt(src_txt)
    if len(response_src.shape) > 1:
        response_src = response_src[1]
    sphere = get_sphere('symmetric724')
    sh_resp = AxSymShResponse(0, response_src)
    sig_resp = sh_resp.on_sphere(sphere)
    sig_resp = sig_resp[None, None, None, :]

    ren = window.Renderer()
    sphere_actor = actor.odf_slicer(sig_resp, sphere=sphere,colormap='blues')
    ren.add(sphere_actor)
    my_camera = ren.camera()
    my_camera.SetPosition(1.62, -9.19, 4.01)
    my_camera.SetFocalPoint(0.01, -0.46, -0.19)
    my_camera.SetViewUp(0.24, 0.46, 0.86)

    if out_png != False:
        window.record(ren, out_path=out_png, magnification=10, size=(60, 60))
    else:
        window.show(ren, reset_camera=False)
        print('Camera Settings')
        print('Position: ', '(%.2f, %.2f, %.2f)' % my_camera.GetPosition())
        print('Focal Point: ', '(%.2f, %.2f, %.2f)' % my_camera.GetFocalPoint())
        print('View Up: ', '(%.2f, %.2f, %.2f)' % my_camera.GetViewUp())

    vdisplay.stop()
def tractography(brain, affine_brain, labels, diff, affine_diff, gtab, img):
    # Tractography reconstruction based on EuDX determinist algorithm

    labels = segmentation(brain, affine_brain, diff, affine_diff)
    white_matter = (labels == 3)

    csa_model = CsaOdfModel(gtab, sh_order=2)
    csa_peaks = peaks_from_model(csa_model,
                                 diff,
                                 default_sphere,
                                 relative_peak_threshold=.8,
                                 min_separation_angle=45,
                                 mask=white_matter)

    stopping_criterion = dipy.tracking.stopping_criterion.ThresholdStoppingCriterion(
        csa_peaks.gfa, .25)
    seeds = utils.seeds_from_mask(white_matter, affine_diff, density=1)
    streamlines_generator = LocalTracking(csa_peaks,
                                          stopping_criterion,
                                          seeds,
                                          affine=affine_diff,
                                          step_size=.5)
    streamlines = Streamlines(streamlines_generator)
    if has_fury:
        color = colormap.line_colors(streamlines)

        streamlines_actor = actor.line(streamlines, color)
        r = window.Renderer()
        r.add(streamlines_actor)
        window.record(r, out_path='tractogram.png', size=(800, 800))
        window.show(r)
    sft = StatefulTractogram(streamlines, img, Space.RASMM)
    save_trk(sft, "tractogram.trk", streamlines)
    return streamlines
def showsls(sls, values, outpath, show=False):
    from dipy.viz import window, actor, fvtk
    from dipy.data import fetch_bundles_2_subjects, read_bundles_2_subjects
    from dipy.tracking.streamline import transform_streamlines
    #renderer.clear()

    from dipy.tracking.streamline import length

    renderer = window.Renderer()

    hue = [0.5, 1]  # white to purple to red
    saturation = [0.0, 1.0]  # black to white

    lut_cmap = actor.colormap_lookup_table(
        scale_range=(values.min(), np.percentile(values, 50)),
        hue_range=hue,
        saturation_range=saturation)

    stream_actor5 = actor.line(sls,
                               values,
                               linewidth=0.1,
                               lookup_colormap=lut_cmap)

    renderer.add(stream_actor5)
    bar3 = actor.scalar_bar(lut_cmap)

    renderer.add(bar3)

    # window.show(renderer, size=(600, 600), reset_camera=False)
    if outpath:
        window.record(renderer, out_path=outpath, size=(600, 600))
    if show:
        fvtk.show(renderer)
Example #9
0
def show_tract(segmented_tract, color_positive, segmented_tract_negative,
               color_negative, out_path):
    """Visualization of the segmented tract.
    """
    affine = utils.affine_for_trackvis(voxel_size=np.array([1.25, 1.25, 1.25]))
    bundle_native = transform_streamlines(segmented_tract,
                                          np.linalg.inv(affine))

    bundle_nativeNeg = transform_streamlines(segmented_tract_negative,
                                             np.linalg.inv(affine))

    renderer = window.Renderer()
    stream_actor2 = actor.line(bundle_native,
                               colors=color_positive,
                               linewidth=0.1)

    stream_actorNeg = actor.line(bundle_nativeNeg,
                                 colors=color_negative,
                                 opacity=0.01,
                                 linewidth=0.1)
    renderer.set_camera(position=(408.85, -26.23, 92.12),
                        focal_point=(0.42, -14.03, 0.82),
                        view_up=(-0.09, 0.85, 0.51))

    bar = actor.scalar_bar()
    renderer.add(stream_actor2)

    renderer.add(stream_actorNeg)
    renderer.add(bar)
    window.show(renderer, size=(1920, 1039), reset_camera=False)
    renderer.camera_info()
    """Take a snapshot of the window and save it
    """
    window.record(renderer, out_path=out_path, size=(1920, 1039))
def window_show_test(bundles, mask_roi, anat, interactive=True, outpath=None):
    """

    :param bundles:
    :param mask_roi:
    :param anat:
    :param interactive:
    :param outpath:
    :return:
    """

    candidate_streamlines_actor = actor.streamtube(
        bundles, cmap.line_colors(candidate_sl))
    ROI_actor = actor.contour_from_roi(mask_roi,
                                       color=(1., 1., 0.),
                                       opacity=0.5)

    ren = window.Renderer()
    if anat:
        vol_actor = actor.slicer(anat)
        vol_actor.display(x=40)
        vol_actor2 = vol_actor.copy()
        vol_actor2.display(z=35)

    # Add display objects to canvas
    ren.add(candidate_streamlines_actor)
    ren.add(ROI_actor)
    ren.add(vol_actor)
    ren.add(vol_actor2)
    if outpath is not None:
        window.record(ren, n_frames=1, out_path=outpath, size=(800, 800))
    if interactive:
        window.show(ren)
Example #11
0
def test_text_widget():

    interactive = False

    renderer = window.Renderer()
    axes = actor.axes()
    window.add(renderer, axes)
    renderer.ResetCamera()

    show_manager = window.ShowManager(renderer, size=(900, 900))

    if interactive:
        show_manager.initialize()
        show_manager.render()

    fetch_viz_icons()
    button_png = read_viz_icons(fname='home3.png')

    def button_callback(obj, event):
        print('Button Pressed')

    button = widget.button(show_manager.iren, show_manager.ren,
                           button_callback, button_png, (.8, 1.2), (100, 100))

    global rulez
    rulez = True

    def text_callback(obj, event):

        global rulez
        print('Text selected')
        if rulez:
            obj.GetTextActor().SetInput("Diffusion Imaging Rulez!!")
            rulez = False
        else:
            obj.GetTextActor().SetInput("Diffusion Imaging in Python")
            rulez = True
        show_manager.render()

    text = widget.text(show_manager.iren,
                       show_manager.ren,
                       text_callback,
                       message="Diffusion Imaging in Python",
                       left_down_pos=(0., 0.),
                       right_top_pos=(0.4, 0.05),
                       opacity=1.,
                       border=False)

    if not interactive:
        button.Off()
        text.Off()
        pass

    if interactive:
        show_manager.render()
        show_manager.start()

    arr = window.snapshot(renderer, size=(900, 900))
    report = window.analyze_snapshot(arr)
    npt.assert_equal(report.objects, 3)
Example #12
0
def test_dots(interactive=False):
    points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])

    dots_actor = actor.dots(points, color=(0, 255, 0))

    renderer = window.Renderer()
    renderer.add(dots_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    if interactive:
        window.show(renderer, reset_camera=False)

    npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1)

    extent = renderer.GetActors().GetLastActor().GetBounds()
    npt.assert_equal(extent, (0.0, 1.0, 0.0, 1.0, 0.0, 0.0))

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, colors=(0, 255, 0))
    npt.assert_equal(report.objects, 3)

    # Test one point
    points = np.array([0, 0, 0])
    dot_actor = actor.dots(points, color=(0, 0, 255))

    renderer.clear()
    renderer.add(dot_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, colors=(0, 0, 255))
    npt.assert_equal(report.objects, 1)
def show_gradients(gtab):

    renderer = window.Renderer()
    renderer.add(fvtk.point(gtab.gradients, (1, 0, 0), point_radius=100))
    renderer.add(fvtk.point(-gtab.gradients, (1, 0, 0), point_radius=100))

    window.show(renderer)
Example #14
0
def test_renderer():

    ren = window.Renderer()

    # background color for renderer (1, 0.5, 0)
    # 0.001 added here to remove numerical errors when moving from float
    # to int values
    bg_float = (1, 0.501, 0)

    # that will come in the image in the 0-255 uint scale
    bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8'))

    ren.background(bg_float)
    # window.show(ren)
    arr = window.snapshot(ren)

    report = window.analyze_snapshot(arr,
                                     bg_color=bg_color,
                                     colors=[bg_color, (0, 127, 0)])
    npt.assert_equal(report.objects, 0)
    npt.assert_equal(report.colors_found, [True, False])

    axes = actor.axes()
    ren.add(axes)
    # window.show(ren)

    arr = window.snapshot(ren)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 1)

    ren.rm(axes)
    arr = window.snapshot(ren)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 0)

    window.add(ren, axes)
    arr = window.snapshot(ren)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 1)

    ren.rm_all()
    arr = window.snapshot(ren)
    report = window.analyze_snapshot(arr, bg_color)
    npt.assert_equal(report.objects, 0)

    ren2 = window.renderer(bg_float)
    ren2.background((0, 0, 0.))

    report = window.analyze_renderer(ren2)
    npt.assert_equal(report.bg_color, (0, 0, 0))

    ren2.add(axes)

    report = window.analyze_renderer(ren2)
    npt.assert_equal(report.actors, 3)

    window.rm(ren2, axes)
    report = window.analyze_renderer(ren2)
    npt.assert_equal(report.actors, 0)
def simple_viewer(streamlines, vol, affine):

    from dipy.viz import actor, window

    renderer = window.Renderer()
    renderer.add(actor.line(streamlines))
    renderer.add(actor.slicer(vol, affine))
    window.show(renderer)
Example #16
0
def genren_AGG(sls,
               sls2=None,
               niidata=None,
               roi1=None,
               roi2=None,
               roi3=None,
               aff=np.eye(4),
               putpath='test.png',
               showme=False,
               showaxes=False):

    renderer = window.Renderer()
    renderer.set_camera(position=(-606.93, -153.23, 28.70),
                        focal_point=(2.78, 11.06, 15.66),
                        view_up=(0, 0, 1))

    stream_actor = actor.line(sls)
    renderer.add(stream_actor)

    if sls2 is not None:
        stream_actor2 = actor.line(sls2, colors=(1, 1, 1))
        renderer.add(stream_actor2)

    if roi1 is not None:
        contour_actor1 = actor.contour_from_roi(roi1,
                                                affine=aff,
                                                color=(1., 1., 0.),
                                                opacity=0.5)
        renderer.add(contour_actor1)

    if roi2 is not None:
        contour_actor2 = actor.contour_from_roi(roi2,
                                                affine=aff,
                                                color=(1., 0., 0.),
                                                opacity=0.5)
        renderer.add(contour_actor2)

    if roi3 is not None:
        contour_actor3 = actor.contour_from_roi(roi3,
                                                affine=aff,
                                                color=(0., 0., 1.),
                                                opacity=0.5)
        renderer.add(contour_actor3)

    if niidata is not None:
        slice_actor = actor.slicer(niidata, affine=aff)
        renderer.add(slice_actor)

    if showaxes:
        axes = actor.axes()
        renderer.add(axes)

    if showme:
        window.show(renderer, size=(500, 500), reset_camera=False)
    window.record(renderer, out_path=putpath, size=(500, 500))
    # renderer.camera_info()
    del renderer
    return putpath
Example #17
0
def visualize_roi(roi,
                  affine_or_mapping=None,
                  static_img=None,
                  roi_affine=None,
                  static_affine=None,
                  reg_template=None,
                  ren=None,
                  color=None,
                  inline=True,
                  interact=False):
    """
    Render a region of interest into a VTK viz as a volume
    """
    if not isinstance(roi, np.ndarray):
        if isinstance(roi, str):
            roi = nib.load(roi).get_data()
        else:
            roi = roi.get_data()

    if affine_or_mapping is not None:
        if isinstance(affine_or_mapping, np.ndarray):
            # This is an affine:
            if (static_img is None or roi_affine is None
                    or static_affine is None):
                raise ValueError(
                    "If using an affine to transform an ROI, "
                    "need to also specify all of the following",
                    "inputs: `static_img`, `roi_affine`, ", "`static_affine`")
            roi = reg.resample(roi, static_img, roi_affine, static_affine)
        else:
            # Assume it is  a mapping:
            if (isinstance(affine_or_mapping, str)
                    or isinstance(affine_or_mapping, nib.Nifti1Image)):
                if reg_template is None or static_img is None:
                    raise ValueError(
                        "If using a mapping to transform an ROI, need to ",
                        "also specify all of the following inputs: ",
                        "`reg_template`, `static_img`")
                affine_or_mapping = reg.read_mapping(affine_or_mapping,
                                                     static_img, reg_template)

            roi = auv.patch_up_roi(
                affine_or_mapping.transform_inverse(
                    roi, interpolation='nearest')).astype(bool)

    if ren is None:
        ren = window.Renderer()

    roi_actor = actor.contour_from_roi(roi, color=color)
    ren.add(roi_actor)

    if inline:
        tdir = tempfile.gettempdir()
        fname = op.join(tdir, "fig.png")
        window.record(ren, out_path=fname)
        display.display_png(display.Image(fname))

    return _inline_interact(ren, inline, interact)
Example #18
0
def plot_tracts(classes, bundle_segmentations, affine, out_dir, brain_mask=None):
    '''
    By default this does not work on a remote server connection (ssh -X) because -X does not support OpenGL.
    On the remote Server you can do 'export DISPLAY=":0"' .
    (you should set the value you get if you do 'echo $DISPLAY' if you
    login locally on the remote server). Then all graphics will get rendered locally and not via -X.
    (important: graphical session needs to be running on remote server (e.g. via login locally))
    (important: login needed, not just stay at login screen)
    '''
    from dipy.viz import window
    from tractseg.libs import vtk_utils

    SMOOTHING = 10
    WINDOW_SIZE = (800, 800)
    bundles = ["CST_right", "CA", "IFO_right"]

    renderer = window.Renderer()
    renderer.projection('parallel')

    rows = len(bundles)
    X, Y, Z = bundle_segmentations.shape[:3]
    for j, bundle in enumerate(bundles):
        i = 0  #only one method

        bundle_idx = exp_utils.get_bundle_names(classes)[1:].index(bundle)
        mask_data = bundle_segmentations[:,:,:,bundle_idx]

        if bundle == "CST_right":
            orientation = "axial"
        elif bundle == "CA":
            orientation = "axial"
        elif bundle == "IFO_right":
            orientation = "sagittal"
        else:
            orientation = "axial"

        #bigger: more border
        if orientation == "axial":
            border_y = -100  #-60
        else:
            border_y = -100

        x_current = X * i  # column (width)
        y_current = rows * (Y * 2 + border_y) - (Y * 2 + border_y) * j  # row (height)  (starts from bottom?)

        plot_mask(renderer, mask_data, affine, x_current, y_current,
                            orientation=orientation, smoothing=SMOOTHING, brain_mask=brain_mask)

        #Bundle label
        text_offset_top = -50  # 60
        text_offset_side = -100 # -30
        position = (0 - int(X) + text_offset_side, y_current + text_offset_top, 50)
        text_actor = vtk_utils.label(text=bundle, pos=position, scale=(6, 6, 6), color=(1, 1, 1))
        renderer.add(text_actor)

    renderer.reset_camera()
    window.record(renderer, out_path=join(out_dir, "preview.png"),
                  size=(WINDOW_SIZE[0], WINDOW_SIZE[1]), reset_camera=False, magnification=2)
Example #19
0
    def test_renderer(self):
        vdisplay = Xvfb()
        vdisplay.start()

        ren = window.Renderer()
        window.record(ren, n_frames=1, out_path=self.out_file, size=(600, 600))
        self.assertTrue(os.path.exists(self.out_file))

        vdisplay.stop()
Example #20
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    output_names = [
        'axial_superior', 'axial_inferior', 'coronal_posterior',
        'coronal_anterior', 'sagittal_left', 'sagittal_right'
    ]

    output_paths = [
        os.path.join(os.path.dirname(args.output),
                     '{}_' + os.path.basename(args.output)).format(name)
        for name in output_names
    ]
    assert_inputs_exist(parser, [args.bundle, args.map])
    assert_outputs_exists(parser, args, output_paths)

    assignment = np.load(args.map)['arr_0']
    lut = actor.colormap_lookup_table(scale_range=(np.min(assignment),
                                                   np.max(assignment)),
                                      hue_range=(0.1, 1.),
                                      saturation_range=(1, 1.),
                                      value_range=(1., 1.))

    tubes = actor.line(nib.streamlines.load(args.bundle).streamlines,
                       assignment,
                       lookup_colormap=lut)
    scalar_bar = actor.scalar_bar(lut)

    ren = window.Renderer()
    ren.add(tubes)
    ren.add(scalar_bar)

    window.snapshot(ren, output_paths[0])

    ren.pitch(180)
    ren.reset_camera()
    window.snapshot(ren, output_paths[1])

    ren.pitch(90)
    ren.set_camera(view_up=(0, 0, 1))
    ren.reset_camera()
    window.snapshot(ren, output_paths[2])

    ren.pitch(180)
    ren.set_camera(view_up=(0, 0, 1))
    ren.reset_camera()
    window.snapshot(ren, output_paths[3])

    ren.yaw(90)
    ren.reset_camera()
    window.snapshot(ren, output_paths[4])

    ren.yaw(180)
    ren.reset_camera()
    window.snapshot(ren, output_paths[5])
Example #21
0
def test_order_transparent():

    renderer = window.Renderer()

    lines = [
        np.array([[-1, 0, 0.], [1, 0, 0.]]),
        np.array([[-1, 1, 0.], [1, 1, 0.]])
    ]
    colors = np.array([[1., 0., 0.], [0., .5, 0.]])
    stream_actor = actor.streamtube(lines, colors, linewidth=0.3, opacity=0.5)

    renderer.add(stream_actor)

    renderer.reset_camera()

    # green in front
    renderer.elevation(90)
    renderer.camera().OrthogonalizeViewUp()
    renderer.reset_clipping_range()

    renderer.reset_camera()

    not_xvfb = os.environ.get("TEST_WITH_XVFB", False)

    if not_xvfb:
        arr = window.snapshot(renderer,
                              fname='green_front.png',
                              offscreen=True,
                              order_transparent=False)
    else:
        arr = window.snapshot(renderer,
                              fname='green_front.png',
                              offscreen=False,
                              order_transparent=False)

    # therefore the green component must have a higher value (in RGB terms)
    npt.assert_equal(arr[150, 150][1] > arr[150, 150][0], True)

    # red in front
    renderer.elevation(-180)
    renderer.camera().OrthogonalizeViewUp()
    renderer.reset_clipping_range()

    if not_xvfb:
        arr = window.snapshot(renderer,
                              fname='red_front.png',
                              offscreen=True,
                              order_transparent=True)
    else:
        arr = window.snapshot(renderer,
                              fname='red_front.png',
                              offscreen=False,
                              order_transparent=True)

    # therefore the red component must have a higher value (in RGB terms)
    npt.assert_equal(arr[150, 150][0] > arr[150, 150][1], True)
Example #22
0
def bundle_coherence(streamlines, affine, k, t1_data=None, interactive=False):

    # Compute lookup table

    # Apply FBC measures
    from dipy.tracking.fbcmeasures import FBCMeasures

    fbc = FBCMeasures(streamlines, k)

    # Calculate LFBC for original fibers
    fbc_sl_orig, clrs_orig, rfbc_orig = \
      fbc.get_points_rfbc_thresholded(0, emphasis=0.01)

    # Apply a threshold on the RFBC to remove spurious fibers
    fbc_sl_thres, clrs_thres, rfbc_thres = \
      fbc.get_points_rfbc_thresholded(0.125, emphasis=0.01)

    # Visualize the results
    from dipy.viz import window, actor

    # Create renderer
    ren = window.Renderer()

    # Original lines colored by LFBC
    lineactor = actor.line(fbc_sl_orig, clrs_orig, linewidth=0.2)
    ren.add(lineactor)

    # Horizontal (axial) slice of T1 data
    if t1_data is not None:
        vol_actor1 = actor.slicer(t1_data, affine=affine)
        vol_actor1.display(z=20)
        ren.add(vol_actor1)

        # Vertical (sagittal) slice of T1 data
        vol_actor2 = actor.slicer(t1_data, affine=affine)
        vol_actor2.display(x=35)
        ren.add(vol_actor2)

    # Show original fibers
    ren.set_camera(position=(-264, 285, 155),
                   focal_point=(0, -14, 9),
                   view_up=(0, 0, 1))
    window.record(ren, n_frames=1, out_path='OR_before.png', size=(900, 900))
    if interactive:
        window.show(ren)

    # Show thresholded fibers
    ren.rm(lineactor)
    ren.add(actor.line(fbc_sl_thres, clrs_thres, linewidth=0.2))
    window.record(ren, n_frames=1, out_path='OR_after.png', size=(900, 900))
    if interactive:
        window.show(ren)

    return k
Example #23
0
    def test_renderer(self):
        vdisplay = Xvfb()
        vdisplay.start()

        ren = window.Renderer()

        with tempfile.TemporaryDirectory() as dir:
            out_file = os.path.join(dir, 'test.png')
            window.record(ren, n_frames=1, out_path=out_file, size=(600, 600))
            self.assertTrue(os.path.exists(out_file))

        vdisplay.stop()
Example #24
0
def load_ft():
    from dipy.io.streamline import load_trk
    from dipy.viz import window, actor, colormap as cmap

    streams, hdr = load_trk(tract_name)
    streamlines = Streamlines(streams)

    streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))

    # Create the 3D display.
    r = window.Renderer()
    r.add(streamlines_actor)
Example #25
0
File: qa_fibers.py Project: j1c/m2g
def visualize_fibs(fibs, fibfile, atlasfile, outdir, opacity, num_samples):
    """
    Takes fiber streamlines and visualizes them using DiPy
    Required Arguments:
        - fibfile: Path to fiber file
        - atlasfile: Path to atlas file
        - outdir: Path to output directory
        - opacity: Opacity of overlayed brain
        - num_samples: number of fibers to randomly sample from fibfile
    Optional Arguments:
    """
    try:
        import vtk
        print("VTK found - beginning fiber QA.")
    except ImportError:
        print("!! VTK not found; skipping fiber QA.")
        return

    # loading the fibers
    fibs = threshold_fibers(fibs)

    # make sure if fiber streamlines
    # have no fibers, no error occurs
    if len(fibs) == 0:
        return
    # randomly sample num_samples fibers from given fibers
    resampled_fibs = random_sample(fibs, num_samples)

    # load atlas file
    atlas_volume = load_atlas(atlasfile, opacity)

    # Initialize renderer
    renderer = window.Renderer()
    renderer.SetBackground(1.0, 1.0, 1.0)

    # Add streamlines as a DiPy viz object
    stream_actor = actor.line(fibs)

    # Set camera orientation properties
    # TODO: allow this as an argument
    renderer.set_camera()  # args are: position=(), focal_point=(), view_up=()

    # Add streamlines to viz session
    renderer.add(stream_actor)
    renderer.add(atlas_volume)

    # Display fibers
    # TODO: allow size of window as an argument
    # window.show(renderer, size=(600, 600), reset_camera=False)

    fname = os.path.split(fibfile)[1].split('.')[0] + '.png'
    window.record(renderer, out_path=outdir + fname, size=(600, 600))
Example #26
0
def test_peak_slicer(interactive=False):

    _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4')
    # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape

    peak_dirs = np.zeros((11, 11, 11, 3, 3))

    peak_values = np.random.rand(11, 11, 11, 3)

    peak_dirs[:, :, :] = _peak_dirs

    renderer = window.Renderer()
    peak_actor = actor.peak_slicer(peak_dirs)
    renderer.add(peak_actor)
    renderer.add(actor.axes((11, 11, 11)))
    if interactive:
        window.show(renderer)

    renderer.clear()
    renderer.add(peak_actor)
    renderer.add(actor.axes((11, 11, 11)))
    for k in range(11):
        peak_actor.display_extent(0, 10, 0, 10, k, k)

    for j in range(11):
        peak_actor.display_extent(0, 10, j, j, 0, 10)

    for i in range(11):
        peak_actor.display(i, None, None)

    renderer.rm_all()

    peak_actor = actor.peak_slicer(
        peak_dirs,
        peak_values,
        mask=None,
        affine=np.diag([3, 2, 1, 1]),
        colors=None,
        opacity=1,
        linewidth=3,
        lod=True,
        lod_points=10 ** 4,
        lod_points_size=3)

    renderer.add(peak_actor)
    renderer.add(actor.axes((11, 11, 11)))
    if interactive:
        window.show(renderer)

    report = window.analyze_renderer(renderer)
    ex = ['vtkLODActor', 'vtkOpenGLActor', 'vtkOpenGLActor', 'vtkOpenGLActor']
    npt.assert_equal(report.actors_classnames, ex)
Example #27
0
def vis_2d_field(odf, sphere):
    """
    Visualize a 2D ODF field.
    """

    r = window.Renderer()
    sfu = actor.odf_slicer(odf.reshape(1, *odf.shape),
                           sphere=sphere,
                           colormap='plasma',
                           scale=0.5)
    sfu.display(x=0)
    r.add(sfu)
    window.show(r)
Example #28
0
def test_labels(interactive=False):

    text_actor = actor.label("Hello")

    renderer = window.Renderer()
    renderer.add(text_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    if interactive:
        window.show(renderer, reset_camera=False)

    npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1)
Example #29
0
def visualize(atlasfile, outdir, intensityfile):
    """
    Takes fiber streamlines and visualizes them using DiPy
    Required Arguments:
        - atlasfile: Path to atlas file
        - outdir: Path to output directory
        - opacity: Opacity of overlayed brain
    Optional Arguments:
        - fname: name of output file. default is None (fname based on input
          fibfile name)
    """
    intensities, signs = parse_csv(intensityfile)
    # load atlas file
    atlas_volume = load_atlas(atlasfile, intensities, signs)

    faces = [
        (1, 0, 0),
        (-1, 0, 0),
        (0, 1, 0),
        (0, -1, 0),
        (0, 1, 0),
        (0, 0, 1),
        (0, 0, -1),
        (1, 1, 0),
        (1, 0, 1),
        (0, 1, 1),
        (-1, -1, 0),
        (-1, 0, -1),
        (0, -1, -1),
    ]

    for i in range(len(faces)):
        # Initialize renderer
        renderer = window.Renderer()

        renderer.background((1, 1, 1))

        # Set camera orientation properties
        # TODO: allow this as an argument
        renderer.set_camera(
            position=faces[i]
        )  # args are: position=(), focal_point=(), view_up=()

        # Add streamlines to viz session
        renderer.add(atlas_volume)

        # Saves file, if you're into that sort of thing...
        fname = os.path.split(atlasfile)[1].split('.')[0] + str(i) + '.png'
        window.record(renderer, out_path=outdir + fname, size=(600, 600))
    print('done')
Example #30
0
def show_fascicles_wholebrain(s_list,
                              vec_vols,
                              folder_name,
                              mask_type,
                              downsamp=1,
                              scale=[3, 6],
                              hue=[0.25, -0.05],
                              saturation=[0.1, 1.0]):

    s_img = folder_name + r'\streamlines' + r'\fascicles_AxCaliber_weighted_3d_' + mask_type + '.png'
    #hue = [0.4, 0.7] # blues
    #hue = [0.25, -0.05] #Hot
    #hue = [0, 1] #All

    weighted = True
    '''
    if weighted:
        scale = [0, 3]


    else:
        scale = [0, 6]
        vec_vols = np.log(vec_vols)
        #vec_vols = vec_vols-np.nanmin(vec_vols)/(np.nanmax(vec_vols)-np.nanmin(vec_vols))
    '''

    if downsamp != 1:
        vec_vols = vec_vols[::downsamp]
        s_list = s_list[::downsamp]

    lut_cmap = actor.colormap_lookup_table(hue_range=hue,
                                           saturation_range=saturation,
                                           scale_range=scale)
    bar = actor.scalar_bar(lut_cmap)
    #w_actor = actor.line(s_list, vec_vols, linewidth=1.2, lookup_colormap=lut_cmap)
    w_actor = actor.streamtube(s_list,
                               vec_vols,
                               linewidth=0.6,
                               lookup_colormap=lut_cmap)

    #w_actor = actor.streamtube(s_list, vec_vols, linewidth=0.3, lookup_colormap=lut_cmap)
    #w_actor = actor.line(s_list, linewidth=1.0, lookup_colormap=lut_cmap)

    r = window.Renderer()
    #r.SetBackground(*window.colors.white)
    r.add(w_actor)
    r.add(bar)
    window.show(r)
    r.set_camera(r.camera_info())
    window.record(r, out_path=s_img, size=(800, 800))