示例#1
0
def test_deprecated():
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", DeprecationWarning)
        scene = window.Renderer()
        npt.assert_equal(scene.size(), (0, 0))
        npt.assert_equal(len(w), 1)
        npt.assert_(issubclass(w[-1].category, DeprecationWarning))

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", DeprecationWarning)
        scene = window.renderer(background=(0.0, 1.0, 0.0))
        npt.assert_equal(scene.size(), (0, 0))
        npt.assert_equal(len(w), 1)
        npt.assert_(issubclass(w[-1].category, DeprecationWarning))

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", DeprecationWarning)
        scene = window.ren()
        npt.assert_equal(scene.size(), (0, 0))
        npt.assert_equal(len(w), 2)
        npt.assert_(issubclass(w[-1].category, DeprecationWarning))

    scene = window.Scene()
    with warnings.catch_warnings(record=True) as l_warn:
        warnings.simplefilter("always", DeprecationWarning)
        obj = actor.axes(scale=(1, 1, 1))
        window.add(scene, obj)
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 3)
        window.rm(scene, obj)
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 0)
        window.add(scene, obj)
        window.rm_all(scene)
        arr = window.snapshot(scene)
        report = window.analyze_snapshot(arr)
        npt.assert_equal(report.objects, 0)
        window.add(scene, obj)
        window.clear(scene)
        report = window.analyze_renderer(scene)
        npt.assert_equal(report.actors, 0)
        deprecated_warns = [
            w for w in l_warn if issubclass(w.category, DeprecationWarning)
        ]
        npt.assert_equal(len(deprecated_warns), 7)
        npt.assert_(issubclass(l_warn[-1].category, DeprecationWarning))
示例#2
0
def test_contour_from_roi():

    # Render volume
    renderer = window.renderer()
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data, affine,
                                     color=np.array([1, 0, 1]),
                                     opacity=.5)
    renderer.add(surface)

    renderer.reset_camera()
    renderer.reset_clipping_range()
    # window.show(renderer)

    # Test binarization
    renderer2 = window.renderer()
    data2 = np.zeros((50, 50, 50))
    data2[20:30, 25, 25] = 1.
    data2[35:40, 25, 25] = 1.
    affine = np.eye(4)
    surface2 = actor.contour_from_roi(data2, affine,
                                      color=np.array([0, 1, 1]),
                                      opacity=.5)
    renderer2.add(surface2)

    renderer2.reset_camera()
    renderer2.reset_clipping_range()
    # window.show(renderer2)

    arr = window.snapshot(renderer, 'test_surface.png', offscreen=True)
    arr2 = window.snapshot(renderer2, 'test_surface2.png', offscreen=True)

    report = window.analyze_snapshot(arr, find_objects=True)
    report2 = window.analyze_snapshot(arr2, find_objects=True)

    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report2.objects, 2)

    # test on real streamlines using tracking example
    from dipy.data import read_stanford_labels
    from dipy.reconst.shm import CsaOdfModel
    from dipy.data import default_sphere
    from dipy.direction import peaks_from_model
    from dipy.tracking.local import ThresholdTissueClassifier
    from dipy.tracking import utils
    from dipy.tracking.local import LocalTracking
    from fury.colormap import line_colors

    hardi_img, gtab, labels_img = read_stanford_labels()
    data = hardi_img.get_data()
    labels = labels_img.get_data()
    affine = hardi_img.affine

    white_matter = (labels == 1) | (labels == 2)

    csa_model = CsaOdfModel(gtab, sh_order=6)
    csa_peaks = peaks_from_model(csa_model, data, default_sphere,
                                 relative_peak_threshold=.8,
                                 min_separation_angle=45,
                                 mask=white_matter)

    classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)

    seed_mask = labels == 2
    seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

    # Initialization of LocalTracking.
    # The computation happens in the next step.
    streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                                step_size=2)

    # Compute streamlines and store as a list.
    streamlines = list(streamlines)

    # Prepare the display objects.
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))
    seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5)

    # Create the 3d display.
    r = window.ren()
    r2 = window.ren()
    r.add(streamlines_actor)
    arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
    report3 = window.analyze_snapshot(arr3, find_objects=True)
    r2.add(streamlines_actor)
    r2.add(seedroi_actor)
    arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
    report4 = window.analyze_snapshot(arr4, find_objects=True)

    # assert that the seed ROI rendering is not far
    # away from the streamlines (affine error)
    npt.assert_equal(report3.objects, report4.objects)
示例#3
0
def screenshot_fa_peaks(fa, peaks, directory='.'):
    """
    Compute 3 view screenshot with peaks on FA.

    Parameters
    ----------
    fa : string
        FA filename.
    peaks : string
        Peak filename.
    directory : string
        Directory to save the mosaic.

    Returns
    -------
    name : string
        Path of the mosaic
    """
    slice_name = ['sagittal', 'coronal', 'axial']
    data = nib.load(fa).get_data()
    evecs_data = nib.load(peaks).get_data()

    evecs = np.zeros(data.shape + (1, 3))
    evecs[:, :, :, 0, :] = evecs_data[...]

    middle = [data.shape[0] // 2 + 4, data.shape[1] // 2,
              data.shape[2] // 2]

    slice_display = [(middle[0], None, None), (None, middle[1], None),
                     (None, None, middle[2])]

    concat = []
    for j, slice_name in enumerate(slice_name):
        image_name = os.path.basename(str(peaks)).split(".")[0]
        name = os.path.join(directory, image_name + '.png')
        slice_actor = actor.slicer(data, interpolation='nearest', opacity=0.3)
        peak_actor = actor.peak_slicer(evecs, colors=None)

        peak_actor.GetProperty().SetLineWidth(2.5)

        slice_actor.display(slice_display[j][0], slice_display[j][1],
                            slice_display[j][2])
        peak_actor.display(slice_display[j][0], slice_display[j][1],
                           slice_display[j][2])

        renderer = window.ren()

        renderer.add(slice_actor)
        renderer.add(peak_actor)

        center = slice_actor.GetCenter()
        pos = None
        viewup = None
        if slice_name == "sagittal":
            pos = (center[0] - 350, center[1], center[2])
            viewup = (0, 0, -1)
        elif slice_name == "coronal":
            pos = (center[0], center[1] + 350, center[2])
            viewup = (0, 0, -1)
        elif slice_name == "axial":
            pos = (center[0], center[1], center[2] + 350)
            viewup = (0, -1, 1)

        camera = renderer.GetActiveCamera()
        camera.SetViewUp(viewup)

        camera.SetPosition(pos)
        camera.SetFocalPoint(center)

        img = renderer_to_arr(renderer, (1080, 1080))
        if len(concat) == 0:
            concat = img
        else:
            concat = np.hstack((concat, img))

    imgs_comb = Image.fromarray(concat)
    imgs_comb.save(name)

    return name