Пример #1
0
def bench_csdeconv(center=(50, 40, 40), width=12):
    img, gtab, labels_img = read_stanford_labels()
    data = img.get_data()

    labels = labels_img.get_data()
    shape = labels.shape
    mask = np.in1d(labels, [1, 2])
    mask.shape = shape

    a, b, c = center
    hw = width // 2
    idx = (slice(a - hw, a + hw), slice(b - hw, b + hw), slice(c - hw, c + hw))

    data_small = data[idx].copy()
    mask_small = mask[idx].copy()
    voxels = mask_small.sum()

    cmd = "model.fit(data_small, mask_small)"
    print("== Benchmarking CSD fit on %d voxels ==" % voxels)
    msg = "SH order - %d, gradient directons - %d :: %g sec"

    # Basic case
    sh_order = 8
    model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=sh_order)
    time = npt.measure(cmd)
    print(msg % (sh_order, num_grad(gtab), time))

    # Smaller data set
    data_small = data_small[..., :75].copy()
    gtab = GradientTable(gtab.gradients[:75])
    model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=sh_order)
    time = npt.measure(cmd)
    print(msg % (sh_order, num_grad(gtab), time))

    # Super resolution
    sh_order = 12
    model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=sh_order)
    time = npt.measure(cmd)
    print(msg % (sh_order, num_grad(gtab), time))
for getting directions from a diffusion data set. 2) A method for identifying
different tissue types within the data set. 3) A set of seeds from which to
begin tracking.  This example shows how to combine the 3 parts described above
to create a tractography reconstruction from a diffusion data set.
"""

"""
To begin, let's load an example HARDI data set from Stanford. If you have
not already downloaded this data set, the first time you run this example you
will need to be connected to the internet and this dataset will be downloaded
to your computer.
"""

from dipy.data import read_stanford_labels

hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.affine

"""
This dataset provides a label map in which all white matter tissues are
labeled either 1 or 2. Lets create a white matter mask to restrict tracking to
the white matter.
"""

white_matter = (labels == 1) | (labels == 2)

"""
1. The first thing we need to begin fiber tracking is a way of getting
directions from this diffusion data set. In order to do that, we can fit the
Пример #3
0
def test_contour_from_roi():

    # Render volume
    renderer = window.renderer()
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data, affine,
                                     color=np.array([1, 0, 1]),
                                     opacity=.5)
    renderer.add(surface)

    renderer.reset_camera()
    renderer.reset_clipping_range()
    # window.show(renderer)

    # Test binarization
    renderer2 = window.renderer()
    data2 = np.zeros((50, 50, 50))
    data2[20:30, 25, 25] = 1.
    data2[35:40, 25, 25] = 1.
    affine = np.eye(4)
    surface2 = actor.contour_from_roi(data2, affine,
                                      color=np.array([0, 1, 1]),
                                      opacity=.5)
    renderer2.add(surface2)

    renderer2.reset_camera()
    renderer2.reset_clipping_range()
    # window.show(renderer2)

    arr = window.snapshot(renderer, 'test_surface.png', offscreen=True)
    arr2 = window.snapshot(renderer2, 'test_surface2.png', offscreen=True)

    report = window.analyze_snapshot(arr, find_objects=True)
    report2 = window.analyze_snapshot(arr2, find_objects=True)

    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report2.objects, 2)

    # test on real streamlines using tracking example
    from dipy.data import read_stanford_labels
    from dipy.reconst.shm import CsaOdfModel
    from dipy.data import default_sphere
    from dipy.direction import peaks_from_model
    from dipy.tracking.local import ThresholdTissueClassifier
    from dipy.tracking import utils
    from dipy.tracking.local import LocalTracking
    from dipy.viz.colormap import line_colors

    hardi_img, gtab, labels_img = read_stanford_labels()
    data = hardi_img.get_data()
    labels = labels_img.get_data()
    affine = hardi_img.get_affine()

    white_matter = (labels == 1) | (labels == 2)

    csa_model = CsaOdfModel(gtab, sh_order=6)
    csa_peaks = peaks_from_model(csa_model, data, default_sphere,
                                 relative_peak_threshold=.8,
                                 min_separation_angle=45,
                                 mask=white_matter)

    classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)

    seed_mask = labels == 2
    seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

    # Initialization of LocalTracking.
    # The computation happens in the next step.
    streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                                step_size=2)

    # Compute streamlines and store as a list.
    streamlines = list(streamlines)

    # Prepare the display objects.
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))
    seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5)

    # Create the 3d display.
    r = window.ren()
    r2 = window.ren()
    r.add(streamlines_actor)
    arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
    report3 = window.analyze_snapshot(arr3, find_objects=True)
    r2.add(streamlines_actor)
    r2.add(seedroi_actor)
    arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
    report4 = window.analyze_snapshot(arr4, find_objects=True)

    # assert that the seed ROI rendering is not far
    # away from the streamlines (affine error)
    npt.assert_equal(report3.objects, report4.objects)
Пример #4
0
import numpy as np

from dipy.data import (read_stanford_labels, default_sphere,
                       read_stanford_pve_maps)
from dipy.direction import DeterministicMaximumDirectionGetter
from dipy.io.trackvis import save_trk
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
                                   auto_response)
from dipy.tracking.local import LocalTracking
from dipy.tracking import utils
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors

ren = fvtk.ren()

hardi_img, gtab, labels_img = read_stanford_labels()
_, _, img_pve_wm = read_stanford_pve_maps()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.affine
white_matter = img_pve_wm.get_data()

seed_mask = np.logical_and(labels == 2, white_matter == 1)

seeds = utils.seeds_from_mask(seed_mask, density=2, affine=affine)

response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = ConstrainedSphericalDeconvModel(gtab, response)
csd_fit = csd_model.fit(data, mask=white_matter)

dg = DeterministicMaximumDirectionGetter.from_shcoeff(csd_fit.shm_coeff,