def exampleDipy(): # example obtained from: http://nipy.org/dipy/examples_built/syn_registration_2d.html import ssl if hasattr(ssl, '_create_unverified_context'): ssl._create_default_https_context = ssl._create_unverified_context from dipy.data import fetch_stanford_hardi, read_stanford_hardi fetch_stanford_hardi() nib_stanford, gtab_stanford = read_stanford_hardi() stanford_b0 = np.squeeze(nib_stanford.get_data())[..., 0] from dipy.data.fetcher import fetch_syn_data, read_syn_data fetch_syn_data() nib_syn_t1, nib_syn_b0 = read_syn_data() syn_b0 = np.array(nib_syn_b0.get_data()) from dipy.segment.mask import median_otsu stanford_b0_masked, stanford_b0_mask = median_otsu(stanford_b0, 4, 4) syn_b0_masked, syn_b0_mask = median_otsu(syn_b0, 4, 4) static = stanford_b0_masked static_affine = nib_stanford.affine moving = syn_b0_masked moving_affine = nib_syn_b0.affine pre_align = np.array( [[1.02783543e+00, -4.83019053e-02, -6.07735639e-02, -2.57654118e+00], [4.34051706e-03, 9.41918267e-01, -2.66525861e-01, 3.23579799e+01], [5.34288908e-02, 2.90262026e-01, 9.80820307e-01, -1.46216651e+01], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) from dipy.align.imaffine import AffineMap affine_map = AffineMap(pre_align, static.shape, static_affine, moving.shape, moving_affine) resampled = affine_map.transform(moving) metric = CCMetric(3) level_iters = [10, 10, 5] sdr = SymmetricDiffeomorphicRegistration(metric, level_iters) mapping = sdr.optimize(static, moving, static_affine, moving_affine, pre_align) warped_moving = mapping.transform(moving) for slice in range(41 - 12, 41 + 13): regtools.overlay_slices(static, resampled, slice, 1, 'Static', 'Pre Moving', 'GIFexample1/' + str(slice) + 'T1pre.png') regtools.overlay_slices(static, warped_moving, slice, 1, 'Static', 'Post moving', 'GIFexample1/' + str(slice) + 'T1post.png')
AffineTransform3D) """ Let's fetch two b0 volumes, the static image will be the b0 from the Stanford HARDI dataset """ fetch_stanford_hardi() nib_stanford, gtab_stanford = read_stanford_hardi() static = np.squeeze(nib_stanford.get_data())[..., 0] static_grid2world = nib_stanford.affine """ Now the moving image """ fetch_syn_data() nib_syn_t1, nib_syn_b0 = read_syn_data() moving = np.array(nib_syn_b0.get_data()) moving_grid2world = nib_syn_b0.affine """ We can see that the images are far from aligned by drawing one on top of the other. The images don't even have the same number of voxels, so in order to draw one on top of the other we need to resample the moving image on a grid of the same dimensions as the static image, we can do this by "transforming" the moving image using an identity transform """ identity = np.eye(4) affine_map = AffineMap(identity, static.shape, static_grid2world, moving.shape, moving_grid2world) resampled = affine_map.transform(moving) regtools.overlay_slices(static, resampled, None, 0, "Static", "Moving",
""" Let's fetch two b0 volumes, the static image will be the b0 from the Stanford HARDI dataset """ fetch_stanford_hardi() nib_stanford, gtab_stanford = read_stanford_hardi() static = np.squeeze(nib_stanford.get_data())[..., 0] static_grid2world = nib_stanford.affine """ Now the moving image """ fetch_syn_data() nib_syn_t1, nib_syn_b0 = read_syn_data() moving = np.array(nib_syn_b0.get_data()) moving_grid2world = nib_syn_b0.affine """ We can see that the images are far from aligned by drawing one on top of the other. The images don't even have the same number of voxels, so in order to draw one on top of the other we need to resample the moving image on a grid of the same dimensions as the static image, we can do this by "transforming" the moving image using an identity transform """ identity = np.eye(4) affine_map = AffineMap(identity, static.shape, static_grid2world, moving.shape, moving_grid2world)
#get the current images from the metric wmoving = sdr.metric.moving_image wstatic = sdr.metric.static_image #draw the images on top of each other with different colors regtools.overlay_images(wmoving, wstatic, 'Warped moving', 'Overlay', 'Warped static') """ Now we are ready to configure and run the registration. First load the data """ from dipy.data.fetcher import fetch_syn_data, read_syn_data from dipy.segment.mask import median_otsu fetch_syn_data() t1, b0 = read_syn_data() data = np.array(b0.get_data(), dtype=np.float64) """ We first remove the skull from the b0 volume """ b0_mask, mask = median_otsu(data, median_radius=4, numpass=4) """ And select two slices to try the 2D registration """ static = b0_mask[:, :, 40] moving = b0_mask[:, :, 38]
#get the current images from the metric wmoving = sdr.metric.moving_image wstatic = sdr.metric.static_image #draw the images on top of each other with different colors regtools.overlay_images(wmoving, wstatic, 'Warped moving', 'Overlay', 'Warped static') """ Now we are ready to configure and run the registration. First load the data """ from dipy.data.fetcher import fetch_syn_data, read_syn_data from dipy.segment.mask import median_otsu fetch_syn_data() t1, b0 = read_syn_data() data = np.array(b0.get_data(), dtype=np.float64) """ We first remove the skull from the b0 volume """ b0_mask, mask = median_otsu(data, 4, 4) """ And select two slices to try the 2D registration """ static = b0_mask[:, :, 40] moving = b0_mask[:, :, 38]