示例#1
0
def exampleDipy():
	
    # example obtained from: http://nipy.org/dipy/examples_built/syn_registration_2d.html
    import ssl
    if hasattr(ssl, '_create_unverified_context'):
        ssl._create_default_https_context = ssl._create_unverified_context
    from dipy.data import fetch_stanford_hardi, read_stanford_hardi
    fetch_stanford_hardi()
    nib_stanford, gtab_stanford = read_stanford_hardi()
    stanford_b0 = np.squeeze(nib_stanford.get_data())[..., 0]

    from dipy.data.fetcher import fetch_syn_data, read_syn_data
    fetch_syn_data()
    nib_syn_t1, nib_syn_b0 = read_syn_data()
    syn_b0 = np.array(nib_syn_b0.get_data())

    from dipy.segment.mask import median_otsu

    stanford_b0_masked, stanford_b0_mask = median_otsu(stanford_b0, 4, 4)
    syn_b0_masked, syn_b0_mask = median_otsu(syn_b0, 4, 4)

    static = stanford_b0_masked
    static_affine = nib_stanford.affine
    moving = syn_b0_masked
    moving_affine = nib_syn_b0.affine

    pre_align = np.array(
        [[1.02783543e+00, -4.83019053e-02, -6.07735639e-02, -2.57654118e+00],
         [4.34051706e-03, 9.41918267e-01, -2.66525861e-01, 3.23579799e+01],
         [5.34288908e-02, 2.90262026e-01, 9.80820307e-01, -1.46216651e+01],
         [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

    from dipy.align.imaffine import AffineMap
    affine_map = AffineMap(pre_align,
                           static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)

    metric = CCMetric(3)

    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_affine, moving_affine,
                           pre_align)

    warped_moving = mapping.transform(moving)

    for slice in range(41 - 12, 41 + 13):
        regtools.overlay_slices(static, resampled, slice, 1, 'Static',
                                'Pre Moving',
                                'GIFexample1/' + str(slice) + 'T1pre.png')
        regtools.overlay_slices(static, warped_moving, slice, 1, 'Static',
                                'Post moving',
                                'GIFexample1/' + str(slice) + 'T1post.png')
示例#2
0
from dipy.align.transforms import (TranslationTransform3D, RigidTransform3D,
                                   AffineTransform3D)
"""
Let's fetch two b0 volumes, the static image will be the b0 from the Stanford
HARDI dataset
"""

files, folder = fetch_stanford_hardi()
static_data, static_affine = load_nifti(pjoin(folder, 'HARDI150.nii.gz'))
static = np.squeeze(static_data)[..., 0]
static_grid2world = static_affine
"""
Now the moving image
"""

files, folder = fetch_syn_data()
moving_data, moving_affine = load_nifti(pjoin(folder, 'b0.nii.gz'))
moving = moving_data
moving_grid2world = moving_affine
"""
We can see that the images are far from aligned by drawing one on top of
the other. The images don't even have the same number of voxels, so in order
to draw one on top of the other we need to resample the moving image on a grid
of the same dimensions as the static image, we can do this by "transforming"
the moving image using an identity transform
"""

identity = np.eye(4)
affine_map = AffineMap(identity, static.shape, static_grid2world, moving.shape,
                       moving_grid2world)
resampled = affine_map.transform(moving)
示例#3
0
from dipy.align.transforms import (TranslationTransform3D, RigidTransform3D,
                                   AffineTransform3D)
"""
Let's fetch two b0 volumes, the static image will be the b0 from the Stanford
HARDI dataset
"""

fetch_stanford_hardi()
nib_stanford, gtab_stanford = read_stanford_hardi()
static = np.squeeze(nib_stanford.get_data())[..., 0]
static_grid2world = nib_stanford.affine
"""
Now the moving image
"""

fetch_syn_data()
nib_syn_t1, nib_syn_b0 = read_syn_data()
moving = np.array(nib_syn_b0.get_data())
moving_grid2world = nib_syn_b0.affine
"""
We can see that the images are far from aligned by drawing one on top of
the other. The images don't even have the same number of voxels, so in order
to draw one on top of the other we need to resample the moving image on a grid
of the same dimensions as the static image, we can do this by "transforming"
the moving image using an identity transform
"""

identity = np.eye(4)
affine_map = AffineMap(identity, static.shape, static_grid2world, moving.shape,
                       moving_grid2world)
resampled = affine_map.transform(moving)
示例#4
0
"""
Let's fetch two b0 volumes, the static image will be the b0 from the Stanford
HARDI dataset
"""

fetch_stanford_hardi()
nib_stanford, gtab_stanford = read_stanford_hardi()
static = np.squeeze(nib_stanford.get_data())[..., 0]
static_grid2world = nib_stanford.affine

"""
Now the moving image
"""

fetch_syn_data()
nib_syn_t1, nib_syn_b0 = read_syn_data()
moving = np.array(nib_syn_b0.get_data())
moving_grid2world = nib_syn_b0.affine

"""
We can see that the images are far from aligned by drawing one on top of
the other. The images don't even have the same number of voxels, so in order
to draw one on top of the other we need to resample the moving image on a grid
of the same dimensions as the static image, we can do this by "transforming"
the moving image using an identity transform
"""

identity = np.eye(4)
affine_map = AffineMap(identity,
                       static.shape, static_grid2world,