Beispiel #1
0
def exampleDipy():
	
    # example obtained from: http://nipy.org/dipy/examples_built/syn_registration_2d.html
    import ssl
    if hasattr(ssl, '_create_unverified_context'):
        ssl._create_default_https_context = ssl._create_unverified_context
    from dipy.data import fetch_stanford_hardi, read_stanford_hardi
    fetch_stanford_hardi()
    nib_stanford, gtab_stanford = read_stanford_hardi()
    stanford_b0 = np.squeeze(nib_stanford.get_data())[..., 0]

    from dipy.data.fetcher import fetch_syn_data, read_syn_data
    fetch_syn_data()
    nib_syn_t1, nib_syn_b0 = read_syn_data()
    syn_b0 = np.array(nib_syn_b0.get_data())

    from dipy.segment.mask import median_otsu

    stanford_b0_masked, stanford_b0_mask = median_otsu(stanford_b0, 4, 4)
    syn_b0_masked, syn_b0_mask = median_otsu(syn_b0, 4, 4)

    static = stanford_b0_masked
    static_affine = nib_stanford.affine
    moving = syn_b0_masked
    moving_affine = nib_syn_b0.affine

    pre_align = np.array(
        [[1.02783543e+00, -4.83019053e-02, -6.07735639e-02, -2.57654118e+00],
         [4.34051706e-03, 9.41918267e-01, -2.66525861e-01, 3.23579799e+01],
         [5.34288908e-02, 2.90262026e-01, 9.80820307e-01, -1.46216651e+01],
         [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

    from dipy.align.imaffine import AffineMap
    affine_map = AffineMap(pre_align,
                           static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)

    metric = CCMetric(3)

    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_affine, moving_affine,
                           pre_align)

    warped_moving = mapping.transform(moving)

    for slice in range(41 - 12, 41 + 13):
        regtools.overlay_slices(static, resampled, slice, 1, 'Static',
                                'Pre Moving',
                                'GIFexample1/' + str(slice) + 'T1pre.png')
        regtools.overlay_slices(static, warped_moving, slice, 1, 'Static',
                                'Post moving',
                                'GIFexample1/' + str(slice) + 'T1post.png')
Beispiel #2
0
def rw_stanford(snr=None):
    with redirect_stdout(open(os.devnull, "w")), warnings.catch_warnings():
        warnings.simplefilter("ignore")
        img, gtab = read_stanford_hardi()
        assert (img.shape[-1] == gtab.bvals.size)
        S_data = img.get_data()

    S_data_orig = S_data.copy()
    if snr is not None:
        S_data[:] = add_noise(S_data_orig, snr=snr)

    return S_data_orig, S_data, gtab
Beispiel #3
0
def setup_module():
    global subset_b0, subset_dwi_data, subset_t2, subset_b0_img, \
           subset_t2_img, gtab, hardi_affine, MNI_T2_affine
    MNI_T2 = dpd.read_mni_template()
    hardi_img, gtab = dpd.read_stanford_hardi()
    MNI_T2_data = MNI_T2.get_fdata()
    MNI_T2_affine = MNI_T2.affine
    hardi_data = hardi_img.get_fdata()
    hardi_affine = hardi_img.affine
    b0 = hardi_data[..., gtab.b0s_mask]
    mean_b0 = np.mean(b0, -1)

    # We select some arbitrary chunk of data so this goes quicker:
    subset_b0 = mean_b0[40:45, 40:45, 40:45]
    subset_dwi_data = nib.Nifti1Image(hardi_data[40:45, 40:45, 40:45],
                                      hardi_affine)
    subset_t2 = MNI_T2_data[40:50, 40:50, 40:50]
    subset_b0_img = nib.Nifti1Image(subset_b0, hardi_affine)
    subset_t2_img = nib.Nifti1Image(subset_t2, MNI_T2_affine)
Beispiel #4
0
def organize_stanford_data(path=None):
    """
    Create the expected file-system structure for the Stanford HARDI data-set.
    """
    dpd.fetch_stanford_hardi()

    if path is None:
        if not op.exists(afq_home):
            os.mkdir(afq_home)
        my_path = afq_home
    else:
        my_path = path

    base_folder = op.join(my_path, 'stanford_hardi', 'derivatives', 'dmriprep')

    if not op.exists(base_folder):
        anat_folder = op.join(base_folder, 'sub-01', 'sess-01', 'anat')
        os.makedirs(anat_folder, exist_ok=True)
        dwi_folder = op.join(base_folder, 'sub-01', 'sess-01', 'dwi')
        os.makedirs(dwi_folder, exist_ok=True)
        t1_img = dpd.read_stanford_t1()
        nib.save(t1_img, op.join(anat_folder, 'sub-01_sess-01_T1w.nii.gz'))
        seg_img = dpd.read_stanford_labels()[-1]
        nib.save(seg_img,
                 op.join(anat_folder, 'sub-01_sess-01_aparc+aseg.nii.gz'))
        dwi_img, gtab = dpd.read_stanford_hardi()
        nib.save(dwi_img, op.join(dwi_folder, 'sub-01_sess-01_dwi.nii.gz'))
        np.savetxt(op.join(dwi_folder, 'sub-01_sess-01_dwi.bvecs'), gtab.bvecs)
        np.savetxt(op.join(dwi_folder, 'sub-01_sess-01_dwi.bvals'), gtab.bvals)

    dataset_description = {
        "BIDSVersion": "1.0.0",
        "Name": "Stanford HARDI",
        "Subjects": ["sub-01"]
    }

    desc_file = op.join(my_path, 'stanford_hardi', 'dataset_description.json')

    with open(desc_file, 'w') as outfile:
        json.dump(dataset_description, outfile)
Beispiel #5
0
al (MRM 2010) to your datasets.

First import the necessary modules:
"""

import time
from dipy.data import fetch_stanford_hardi, read_stanford_hardi, get_sphere
from dipy.reconst.shm import CsaOdfModel
from dipy.reconst.peaks import peaks_from_model

"""
Download and read the data for this tutorial.
"""

fetch_stanford_hardi()
img, gtab = read_stanford_hardi()

"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).

Load the raw diffusion data and the affine.
"""

data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)

"""
data.shape ``(81, 106, 76, 160)``
import numpy as np
from dipy.viz import regtools
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.data.fetcher import fetch_syn_data, read_syn_data
from dipy.align.imaffine import (transform_centers_of_mass, AffineMap,
                                 MutualInformationMetric, AffineRegistration)
from dipy.align.transforms import (TranslationTransform3D, RigidTransform3D,
                                   AffineTransform3D)
"""
Let's fetch two b0 volumes, the static image will be the b0 from the Stanford
HARDI dataset
"""

fetch_stanford_hardi()
nib_stanford, gtab_stanford = read_stanford_hardi()
static = np.squeeze(nib_stanford.get_data())[..., 0]
static_grid2world = nib_stanford.affine
"""
Now the moving image
"""

fetch_syn_data()
nib_syn_t1, nib_syn_b0 = read_syn_data()
moving = np.array(nib_syn_b0.get_data())
moving_grid2world = nib_syn_b0.affine
"""
We can see that the images are far from aligned by drawing one on top of
the other. The images don't even have the same number of voxels, so in order
to draw one on top of the other we need to resample the moving image on a grid
of the same dimensions as the static image, we can do this by "transforming"
Beispiel #7
0
import dipy.reconst.dti as dti
import dipy.reconst.csdeconv as csd
import scipy.stats as stats


"""

We fetch some data and select a couple of voxels to perform comparisons on. One
lies in the corpus callosum (cc), while the other is in the centrum semiovale
(cso), a part of the brain known to contain multiple crossing white matter
fiber populations.

"""

dpd.fetch_stanford_hardi()
img, gtab = dpd.read_stanford_hardi()
data = img.get_data()

cc_vox = data[40, 70, 38]
cso_vox = data[30, 76, 38]

"""

We initialize each kind of model:

"""

dti_model = dti.TensorModel(gtab)
response, ratio = csd.auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = csd.ConstrainedSphericalDeconvModel(gtab, response)
from dipy.data.fetcher import fetch_syn_data, read_syn_data
from dipy.align.imaffine import (transform_centers_of_mass,
                                 AffineMap,
                                 MutualInformationMetric,
                                 AffineRegistration)
from dipy.align.transforms import (TranslationTransform3D,
                                   RigidTransform3D,
                                   AffineTransform3D)

"""
Let's fetch two b0 volumes, the static image will be the b0 from the Stanford
HARDI dataset
"""

fetch_stanford_hardi()
nib_stanford, gtab_stanford = read_stanford_hardi()
static = np.squeeze(nib_stanford.get_data())[..., 0]
static_grid2world = nib_stanford.affine

"""
Now the moving image
"""

fetch_syn_data()
nib_syn_t1, nib_syn_b0 = read_syn_data()
moving = np.array(nib_syn_b0.get_data())
moving_grid2world = nib_syn_b0.affine

"""
We can see that the images are far from aligned by drawing one on top of
the other. The images don't even have the same number of voxels, so in order
Beispiel #9
0
import numpy as np
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.data.fetcher import fetch_syn_data, read_syn_data

fetch_stanford_hardi()
nib_stanford, _ = read_stanford_hardi()
static = np.squeeze(nib_stanford.get_data())[..., 0]
static_grid2world = nib_stanford.affine

fetch_syn_data()
_, nib_syn_b0 = read_syn_data()
moving = np.array(nib_syn_b0.get_data())
moving_grid2world = nib_syn_b0.affine

static = ((static.astype(np.float64) - static.min()) /
          (static.max() - static.min()))
moving = ((moving.astype(np.float64) - moving.min()) /
          (moving.max() - moving.min()))

static = np.array(static).astype(np.float64)
moving = np.array(moving).astype(np.float64)

import numpy.linalg as npl
moving_world2grid = npl.inv(moving_grid2world)

from dipy.align.imwarp import get_direction_and_spacings
dim = len(static.shape)
moving_direction, moving_spacing = \
    get_direction_and_spacings(moving_grid2world, dim)

from dipy.align.vector_fields import _gradient_3d
Beispiel #10
0
First import the necessary modules:
"""

import numpy as np
import nibabel as nib
from dipy.data import fetch_stanford_hardi, read_stanford_hardi, get_sphere
from dipy.reconst.shm import CsaOdfModel, normalize_data
from dipy.reconst.peaks import peaks_from_model

"""
Download and read the data for this tutorial.
"""

fetch_stanford_hardi()
img, gtab = read_stanford_hardi()

"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).

Load the raw diffusion data and the affine.
"""

data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)

"""
data.shape ``(81, 106, 76, 160)``
Beispiel #11
0
import dipy.data as dpd
"""
``dipy.viz.fvtk`` is used for 3D visualization and matplotlib for 2D
visualizations:
"""

import dipy.viz.fvtk as fvtk
import matplotlib.pyplot as plt
"""
If needed, the ``fetch_stanford_hardi`` function will download the raw dMRI
dataset of a single subject. The size of this dataset is 87 MBytes. You only
need to fetch once.
"""

dpd.fetch_stanford_hardi()
img, gtab = dpd.read_stanford_hardi()
"""
We initialize a DTI model class instance using the gradient table used in the
measurement. By default, ``dti.TensorModel`` will use a weighted least-squares
algorithm (described in [Chang2005]_) to fit the parameters of the model. We
initialize this model as a baseline for comparison of noise-corrupted models:
"""

dti_wls = dti.TensorModel(gtab)
"""
For the purpose of this example, we will focus on the data from a region of
interest (ROI) surrounding the Corpus Callosum. We define that ROI as the
following indices:
"""

roi_idx = (slice(20, 50), slice(55, 85), slice(38, 39))