Ejemplo n.º 1
0
    def estimate_rigidxy(self, fixed, moving, tx_tr=None):
        assert len(moving.shape) == len(fixed.shape)
        trans = TranslationTransform3D()
        if self.update_map:
            self.metric = MutualInformationMetric(self.nbins,
                                                  self.sampling_prop)
            self.affmap = AffineRegistration(
                metric=self.metric,
                level_iters=self.level_iters,
                sigmas=self.sigmas,
                factors=self.factors,
                method=self.method,
                ss_sigma_factor=self.ss_sigma_factor,
                options=self.options,
                verbosity=self.verbosity)
        if tx_tr is None:
            tmp = self.estimate_rigid2d(fixed.mean(axis=0),
                                        moving.mean(axis=0))
            tmp = tmp.affine
            tx_tr = np.eye(4)
            tx_tr[1:, 1:] = tmp
        if isinstance(tx_tr, AffineMap):
            tx_tr = tx_tr.affine

        trans2d = AffineMap(tx_tr,
                            domain_grid_shape=fixed.shape,
                            codomain_grid_shape=moving.shape)
        moving_ = trans2d.transform(fixed)
        transz = self.affmap.optimize(moving_, moving, trans, self.params0)
        print(transz.affine)
        tx_tr[0, 3] = transz.affine[0, 3]
        return AffineMap(tx_tr,
                         domain_grid_shape=fixed.shape,
                         codomain_grid_shape=moving.shape)
Ejemplo n.º 2
0
def resample_moving(fixed, moving):
    identity = np.eye(4)
    affine_map = AffineMap(identity,
                           fixed.shape, fixed.affine,
                           moving.shape, moving.affine)
    resampled = affine_map.transform(moving.get_data())
    return resampled
Ejemplo n.º 3
0
def change_spacing_4D(img_in, new_spacing=1.25):
    """
    Note: Only works properly if affine is all 0 except for diagonal and offset (=no rotation and sheering)
    """
    data = img_in.get_data()
    old_shape = data.shape
    img_spacing = abs(img_in.affine[0, 0])

    # copy very important; otherwise new_affine changes will also be in old affine
    new_affine = np.copy(img_in.affine)
    new_affine[0, 0] = new_spacing if img_in.affine[0, 0] > 0 else -new_spacing
    new_affine[1, 1] = new_spacing if img_in.affine[1, 1] > 0 else -new_spacing
    new_affine[2, 2] = new_spacing if img_in.affine[2, 2] > 0 else -new_spacing

    new_shape = np.floor(
        np.array(img_in.get_data().shape) * (img_spacing / new_spacing))
    new_shape = new_shape[:3]  # drop last dim

    new_data = []
    for i in range(data.shape[3]):
        affine_map = AffineMap(np.eye(4), new_shape, new_affine, old_shape,
                               img_in.affine)
        # Generally "nearest" a bit better results than "linear" interpolation
        res = affine_map.transform(data[:, :, :, i], interp="nearest")
        new_data.append(res)

    new_data = np.array(new_data).transpose(1, 2, 3, 0)
    img_new = nib.Nifti1Image(new_data, new_affine)

    return img_new
Ejemplo n.º 4
0
def change_spacing_4D(img_in, new_spacing=1.25):
    from dipy.align.imaffine import AffineMap

    data = img_in.get_data()
    old_shape = data.shape
    img_spacing = abs(img_in.get_affine()[0, 0])

    # copy very important; otherwise new_affine changes will also be in old affine
    new_affine = np.copy(img_in.get_affine())
    new_affine[0, 0] = new_spacing if img_in.get_affine()[0, 0] > 0 else -new_spacing
    new_affine[1, 1] = new_spacing if img_in.get_affine()[1, 1] > 0 else -new_spacing
    new_affine[2, 2] = new_spacing if img_in.get_affine()[2, 2] > 0 else -new_spacing

    new_shape = np.floor(np.array(img_in.get_data().shape) * (img_spacing / new_spacing))
    new_shape = new_shape[:3]  # drop last dim

    new_data = []
    for i in range(data.shape[3]):
        affine_map = AffineMap(np.eye(4),
                               new_shape, new_affine,
                               old_shape, img_in.get_affine()
                               )
        #Generally nearest a bit better results than linear interpolation
        # res = affine_map.transform(data[:,:,:,i], interp="linear")
        res = affine_map.transform(data[:, :, :, i], interp="nearest")
        new_data.append(res)

    new_data = np.array(new_data).transpose(1, 2, 3, 0)
    img_new = nib.Nifti1Image(new_data, new_affine)

    return img_new
Ejemplo n.º 5
0
def resample(moving, static, moving_grid2world, static_grid2world):
    """
    """
    identity = np.eye(4)
    affine_map = AffineMap(identity, static.shape, static_grid2world,
                           moving.shape, moving_grid2world)
    resampled = affine_map.transform(moving)
Ejemplo n.º 6
0
def apply_affine_tform(volume,
                       matrix,
                       sampling_grid_shape=None,
                       check_bounds=False,
                       contain_all=False,
                       domain_grid_shape=None,
                       codomain_grid_shape=None,
                       domain_grid2world=None,
                       codomain_grid2world=None,
                       sampling_grid2world=None):
    """
    given a homogeneous transformation matrix, create an affine matrix and use dipy to apply the transformation.
    """

    import numpy as np
    from dipy.align.imaffine import AffineMap

    if domain_grid_shape is None:
        domain_grid_shape = volume.shape
    if codomain_grid_shape is None:
        codomain_grid_shape = volume.shape

    if check_bounds:
        if contain_all:
            in_out_corners, out_shape, tilt_tf_ = compute_transform_bounds(
                domain_grid_shape, matrix, contain_all=True)
        else:
            in_out_corners, out_shape = compute_transform_bounds(
                domain_grid_shape, matrix, contain_all=False)
            tilt_tf_ = None
#        print out_shape
    affine_map = AffineMap(matrix,
                           domain_grid_shape=domain_grid_shape,
                           domain_grid2world=domain_grid2world,
                           codomain_grid_shape=codomain_grid_shape,
                           codomain_grid2world=codomain_grid2world)

    if check_bounds:
        out = affine_map.transform(volume,
                                   sampling_grid_shape=out_shape,
                                   sampling_grid2world=tilt_tf_)
    else:
        out = affine_map.transform(volume,
                                   sampling_grid_shape=sampling_grid_shape,
                                   sampling_grid2world=sampling_grid2world)

    return out
Ejemplo n.º 7
0
def transform_anatomy(transfo, reference, moving, filename_to_save,
                      interp='linear', keep_dtype=False):
    """
    Apply transformation to an image using Dipy's tool

    Parameters
    ----------
    transfo: numpy.ndarray
        Transformation matrix to be applied
    reference: str
        Filename of the reference image (target)
    moving: str
        Filename of the moving image
    filename_to_save: str
        Filename of the output image
    interp : string, either 'linear' or 'nearest'
        the type of interpolation to be used, either 'linear'
        (for k-linear interpolation) or 'nearest' for nearest neighbor
    keep_dtype : bool
        If True, keeps the data_type of the input moving image when saving
        the output image
    """
    grid2world, dim, _, _ = get_reference_info(reference)
    static_data = nib.load(reference).get_fdata(dtype=np.float32)

    nib_file = nib.load(moving)
    curr_type = nib_file.get_data_dtype()
    if keep_dtype:
        moving_data = np.asanyarray(nib_file.dataobj).astype(curr_type)
    else:
        moving_data = nib_file.get_fdata(dtype=np.float32)
    moving_affine = nib_file.affine

    if moving_data.ndim == 3 and isinstance(moving_data[0, 0, 0],
                                            np.ScalarType):
        orig_type = moving_data.dtype
        affine_map = AffineMap(np.linalg.inv(transfo),
                               dim, grid2world,
                               moving_data.shape, moving_affine)
        resampled = affine_map.transform(moving_data.astype(np.float64),
                                         interpolation=interp)
        nib.save(nib.Nifti1Image(resampled.astype(orig_type), grid2world),
                 filename_to_save)
    elif len(moving_data[0, 0, 0]) > 1:
        if isinstance(moving_data[0, 0, 0], np.void):
            raise ValueError('Does not support TrackVis RGB')

        affine_map = AffineMap(np.linalg.inv(transfo),
                               dim[0:3], grid2world,
                               moving_data.shape[0:3], moving_affine)

        orig_type = moving_data.dtype
        resampled = transform_dwi(affine_map, static_data, moving_data,
                                  interpolation=interp)
        nib.save(nib.Nifti1Image(resampled.astype(orig_type), grid2world),
                 filename_to_save)
    else:
        raise ValueError('Does not support this dataset (shape, type, etc)')
Ejemplo n.º 8
0
def resample(moving, static, moving_grid2world, static_grid2world):
    """

    """
    identity = np.eye(4)
    affine_map = AffineMap(identity,
                           static.shape, static_grid2world,
                           moving.shape, moving_grid2world)
    resampled = affine_map.transform(moving)
Ejemplo n.º 9
0
    def register_mask(mask_data,
                      mask_affine,
                      reference_img,
                      elastic_transform=None,
                      binary_img=True,
                      use_inverse=False):
        '''
        Transform a mask (binary image) with the given elastic_transform

        :param mask_data:            data of the mask that should be transformed
        :param mask_affine:     affine of the mask that should be transformed
        :param reference_img:   a nibabel image to get shape and affine from for the Affine Transformation
        :param elastic_transform:
        :param binary_img:      is input a float image (eg T1) or a binary image (eg a mask)

        :return: transformed mask (a binary Image)
        '''

        logging.debug("mask original shape: {}".format(mask_data.shape))

        # Apply affine for mask image (to t1 space)
        affine_map_inv = AffineMap(
            np.eye(4),
            reference_img.get_data().shape,
            Utils.invert_x_and_y(reference_img.get_affine()), mask_data.shape,
            Utils.invert_x_and_y(mask_affine)
        )  # If I do not use invert_x_and_y for source and target, result is identical
        mask_data_reg = affine_map_inv.transform(mask_data)
        if binary_img:
            mask_data_reg = mask_data_reg > 0
        logging.debug("mask registered shape: {}".format(mask_data_reg.shape))

        if elastic_transform:

            # img = nib.Nifti1Image(mask_data_reg.astype(np.uint8), reference_img.get_affine())
            # nib.save(img, "ROI_registered_before.nii.gz")

            if use_inverse:
                mask_data_reg = elastic_transform.transform_inverse(
                    mask_data_reg)
            else:
                mask_data_reg = elastic_transform.transform(mask_data_reg)

            if binary_img:
                mask_data_reg = mask_data_reg > 0

            # img = nib.Nifti1Image(mask_data_reg.astype(np.uint8), reference_img.get_affine())
            # nib.save(img, "ROI_registered_after.nii.gz")

        else:
            logging.warning(
                "Elastic Transform deactivated; only using Affine Transform")

        if binary_img:
            mask_data_reg = mask_data_reg > 0
        return mask_data_reg
Ejemplo n.º 10
0
def exampleDipy():
	
    # example obtained from: http://nipy.org/dipy/examples_built/syn_registration_2d.html
    import ssl
    if hasattr(ssl, '_create_unverified_context'):
        ssl._create_default_https_context = ssl._create_unverified_context
    from dipy.data import fetch_stanford_hardi, read_stanford_hardi
    fetch_stanford_hardi()
    nib_stanford, gtab_stanford = read_stanford_hardi()
    stanford_b0 = np.squeeze(nib_stanford.get_data())[..., 0]

    from dipy.data.fetcher import fetch_syn_data, read_syn_data
    fetch_syn_data()
    nib_syn_t1, nib_syn_b0 = read_syn_data()
    syn_b0 = np.array(nib_syn_b0.get_data())

    from dipy.segment.mask import median_otsu

    stanford_b0_masked, stanford_b0_mask = median_otsu(stanford_b0, 4, 4)
    syn_b0_masked, syn_b0_mask = median_otsu(syn_b0, 4, 4)

    static = stanford_b0_masked
    static_affine = nib_stanford.affine
    moving = syn_b0_masked
    moving_affine = nib_syn_b0.affine

    pre_align = np.array(
        [[1.02783543e+00, -4.83019053e-02, -6.07735639e-02, -2.57654118e+00],
         [4.34051706e-03, 9.41918267e-01, -2.66525861e-01, 3.23579799e+01],
         [5.34288908e-02, 2.90262026e-01, 9.80820307e-01, -1.46216651e+01],
         [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

    from dipy.align.imaffine import AffineMap
    affine_map = AffineMap(pre_align,
                           static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)

    metric = CCMetric(3)

    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_affine, moving_affine,
                           pre_align)

    warped_moving = mapping.transform(moving)

    for slice in range(41 - 12, 41 + 13):
        regtools.overlay_slices(static, resampled, slice, 1, 'Static',
                                'Pre Moving',
                                'GIFexample1/' + str(slice) + 'T1pre.png')
        regtools.overlay_slices(static, warped_moving, slice, 1, 'Static',
                                'Post moving',
                                'GIFexample1/' + str(slice) + 'T1post.png')
Ejemplo n.º 11
0
    def run(self, static_image_file, moving_image_files, affine_matrix_file,
            out_dir='', out_file='transformed.nii.gz'):

        """
        Parameters
        ----------
        static_image_file : string
            Path of the static image file.

        moving_image_files : string
            Path of the moving image(s). It can be a single image or a
            folder containing multiple images.

        affine_matrix_file : string
            The text file containing the affine matrix for transformation.

        out_dir : string, optional
            Directory to save the transformed files (default '').

        out_file : string, optional
            Name of the transformed file (default 'transformed.nii.gz').
             It is recommended to use the flag --mix-names to
              prevent the output files from being overwritten.

        """
        io = self.get_io_iterator()

        for static_image_file, moving_image_file, affine_matrix_file, \
                out_file in io:

            # Loading the image data from the input files into object.
            static_image, static_grid2world = load_nifti(static_image_file)

            moving_image, moving_grid2world = load_nifti(moving_image_file)

            # Doing a sanity check for validating the dimensions of the input
            # images.
            ImageRegistrationFlow.check_dimensions(static_image, moving_image)

            # Loading the affine matrix.
            affine_matrix = np.loadtxt(affine_matrix_file)

            # Setting up the affine transformation object.
            img_transformation = AffineMap(
                affine=affine_matrix,
                domain_grid_shape=static_image.shape,
                domain_grid2world=static_grid2world,
                codomain_grid_shape=moving_image.shape,
                codomain_grid2world=moving_grid2world)

            # Transforming the image/
            transformed = img_transformation.transform(moving_image)

            save_nifti(out_file, transformed, affine=static_grid2world)
Ejemplo n.º 12
0
def resample(moving,
             static,
             moving_affine=None,
             static_affine=None,
             between_affine=None):
    """Resample an image (moving) from one space to another (static).

    Parameters
    ----------
    moving : array, nifti image or str
        Containing the data for the moving object, or full path to a nifti file
        with the moving data.

    moving_affine : 4x4 array, optional
        An affine transformation associated with the moving object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    static : array, nifti image or str
        Containing the data for the static object, or full path to a nifti file
        with the moving data.

    static_affine : 4x4 array, optional
        An affine transformation associated with the static object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    between_affine: 4x4 array, optional
        If an additional affine is needed betweeen the two spaces.
        Default: identity (no additional registration).

    Returns
    -------
    A Nifti1Image class instance with the data from the moving object
    resampled into the space of the static object.

    """

    static, static_affine, moving, moving_affine, between_affine = \
        _handle_pipeline_inputs(moving, static,
                                moving_affine=moving_affine,
                                static_affine=static_affine,
                                starting_affine=between_affine)
    affine_map = AffineMap(between_affine, static.shape, static_affine,
                           moving.shape, moving_affine)
    resampled = affine_map.transform(moving)
    return nib.Nifti1Image(resampled, static_affine)
Ejemplo n.º 13
0
Archivo: image.py Proyecto: BIG-S2/PSC
def transform_anatomy(transfo, reference, moving, filename_to_save):
    dim, grid2world = get_reference_info(reference)

    moving_data, nib_file = get_data(moving, return_object=True)
    moving_affine = nib_file.affine

    if len(moving_data.shape) > 3:
        raise ValueError('Can only transform 3D images')

    affine_map = AffineMap(np.linalg.inv(transfo),
                           dim, grid2world,
                           moving_data.shape, moving_affine)

    resampled = affine_map.transform(moving_data)

    nib.save(nib.Nifti1Image(resampled, grid2world),
             filename_to_save)
Ejemplo n.º 14
0
def transform_anatomy(transfo, reference, moving, filename_to_save):
    """
    Apply transformation to an image using Dipy's tool

    Parameters
    ----------
    transfo: numpy.ndarray
        Transformation matrix to be applied
    reference: str
        Filename of the reference image (target)
    moving: str
        Filename of the moving image
    filename_to_save: str
        Filename of the output image
    """
    dim, grid2world = get_reference_info(reference)
    static_data = get_data(reference)

    moving_data, nib_file = get_data(moving, return_object=True)
    moving_affine = nib_file.affine

    if moving_data.ndim == 3 and isinstance(moving_data[0, 0, 0],
                                            np.ScalarType):
        orig_type = moving_data.dtype
        affine_map = AffineMap(np.linalg.inv(transfo),
                               dim, grid2world,
                               moving_data.shape, moving_affine)
        resampled = affine_map.transform(moving_data.astype(np.float64))
        nib.save(nib.Nifti1Image(resampled.astype(orig_type), grid2world),
                 filename_to_save)
    elif len(moving_data[0, 0, 0]) > 1:
        if isinstance(moving_data[0, 0, 0], np.void):
            raise ValueError('Does not support TrackVis RGB')

        affine_map = AffineMap(np.linalg.inv(transfo),
                               dim[0:3], grid2world,
                               moving_data.shape[0:3], moving_affine)

        orig_type = moving_data.dtype
        resampled = transform_dwi(affine_map, static_data, moving_data)
        nib.save(nib.Nifti1Image(resampled.astype(orig_type), grid2world),
                 filename_to_save)
    else:
        raise ValueError('Does not support this dataset (shape, type, etc)')
Ejemplo n.º 15
0
def resample(moving, static, moving_affine, static_affine):
    """Resample an image from one space to another.

    Parameters
    ----------
    moving : array
       The image to be resampled

    static : array

    moving_affine
    static_affine

    Returns
    -------
    resampled : the moving array resampled into the static array's space.
    """
    identity = np.eye(4)
    affine_map = AffineMap(identity, static.shape, static_affine, moving.shape,
                           moving_affine)
    resampled = affine_map.transform(moving)
    return resampled
Ejemplo n.º 16
0
def resample_volume(moving, static):
    """ 
    Resample a nifti image into the space of another nifti image
    
    Parameters
    ----------
    moving : Nifti1Image
        The 'source' image.
    static : Nifti1Image
        The 'target' image.
        
    Returns
    -------
    resampled_img : Nifti1Image
       The source data in the target space, with the target affine
    """
    affine_map = AffineMap(np.eye(4),
                           static.shape[:3], static.affine, 
                           moving.shape, moving.affine)
    
    resampled = affine_map.transform(moving.get_data())
    return nib.Nifti1Image(resampled, static.get_affine())
Ejemplo n.º 17
0
    def get_elastic_transform(subject_fa, atlas_fa, subject_path=".."):
        '''
        :param subject_fa: the FA (nibabel img) of a static image of a subject       (static)
        :param atlas_fa:  the FA (nibabel img) of an atlas (Atlas will be warped onto subject)   (moving)

        :return: elastic transformation map
        '''

        if isfile(subject_path + "/FAReg_elastic_transform.pklz"):
            logging.debug("Load existing elastic transform...")
            return Utils.load_pkl_compressed(subject_path +
                                             "/FAReg_elastic_transform.pklz")

        static_img = subject_fa
        static = static_img.get_data()
        moving_img = atlas_fa
        moving = moving_img.get_data()

        # Optional (affine transformation of moving image to static coordinate system) -> needed if on very different ones!
        affine_map = AffineMap(np.eye(4), static.shape,
                               static_img.get_affine(), moving.shape,
                               moving_img.get_affine())
        moving = affine_map.transform(moving)

        start_time = time.time()
        metric = CCMetric(3)
        level_iters = [10, 10, 5]  # better
        # level_iters = [2, 2, 2] #fast -> not much
        sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
        mapping = sdr.optimize(static, moving)
        # mapping = sdr.optimize(static, moving, Utils.invert_x_and_y(static_img.get_affine()), Utils.invert_x_and_y(moving_img.get_affine())) #not needed
        logging.debug("elastic transform took {0:.2f}s".format(time.time() -
                                                               start_time))

        logging.debug("write elastic transform...")
        Utils.save_pkl_compressed(
            subject_path + "/FAReg_elastic_transform.pklz", mapping)
        return mapping
Ejemplo n.º 18
0
def resample(moving, static, moving_affine, static_affine):
    """Resample an image from one space to another.

    Parameters
    ----------
    moving : array
       The image to be resampled

    static : array

    moving_affine
    static_affine

    Returns
    -------
    resampled : the moving array resampled into the static array's space.
    """
    identity = np.eye(4)
    affine_map = AffineMap(identity,
                           static.shape, static_affine,
                           moving.shape, moving_affine)
    resampled = affine_map.transform(moving)
    return resampled
Ejemplo n.º 19
0
def affine_reg(static_path, moving_path):
    """

    :param static_path:
    :param moving_path:
    :return:
    """
    t0_time = time.time()

    print('-->Applying affine reg over', basename(moving_path), 'based on',
          basename(static_path))

    static_img = nib.load(static_path)
    static = static_img.get_data()
    static_grid2world = static_img.affine

    moving_img = nib.load(moving_path)
    moving = np.array(moving_img.get_data())
    moving_grid2world = moving_img.affine

    print('---> I. Translation of the moving image towards the static image')

    print(
        '---> Resembling the moving image on a grid of the same dimensions as the static image'
    )

    identity = np.eye(4)
    affine_map = AffineMap(identity, static.shape, static_grid2world,
                           moving.shape, moving_grid2world)
    resampled = affine_map.transform(moving)

    regtools.overlay_slices(static, resampled, None, 0, "Static", "Moving",
                            "resampled_0.png")
    regtools.overlay_slices(static, resampled, None, 1, "Static", "Moving",
                            "resampled_1.png")
    regtools.overlay_slices(static, resampled, None, 2, "Static", "Moving",
                            "resampled_2.png")

    print('---> Aligning the centers of mass of the two images')

    c_of_mass = transform_centers_of_mass(static, static_grid2world, moving,
                                          moving_grid2world)

    print(
        '---> We can now transform the moving image and draw it on top of the static image'
    )

    transformed = c_of_mass.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_com_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_com_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_com_2.png")

    print('---> II. Refine  by looking for an affine transform')

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    """
    Now we go ahead and instantiate the registration class with the configuration
    we just prepared
    """
    print('---> Computing Affine Registration (non-convex optimization)')

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(static,
                                  moving,
                                  transform,
                                  params0,
                                  static_grid2world,
                                  moving_grid2world,
                                  starting_affine=starting_affine)

    transformed = translation.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_trans_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_trans_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_trans_2.png")

    print('--->III. Refining with a rigid transform')

    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(static,
                            moving,
                            transform,
                            params0,
                            static_grid2world,
                            moving_grid2world,
                            starting_affine=starting_affine)

    transformed = rigid.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_rigid_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_rigid_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_rigid_2.png")

    print(
        '--->IV. Refining with a full afine transform (translation, rotation, scale and shear)'
    )

    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(static,
                             moving,
                             transform,
                             params0,
                             static_grid2world,
                             moving_grid2world,
                             starting_affine=starting_affine)

    print('---> Results in a slight shear and scale')

    transformed = affine.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_affine_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_affine_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_affine_2.png")

    name = os.path.splitext(basename(moving_path))[0] + '_affine_reg'
    nib.save(nib.Nifti1Image(transformed, np.eye(4)), name)
    t1_time = time.time()
    total_time = t1_time - t0_time
    print('Total time:' + str(total_time))
    return print('Successfully affine registration applied')
Ejemplo n.º 20
0
def register(src,
             target=None,
             ROI=None,
             target_shape=Shape.mMR,
             src_resolution=Res.MR,
             target_resolution=Res.mMR,
             method="CoM",
             src_offset=None,
             dtype=np.float32):
    """
    Transforms `src` into `target` space.

    @param src  : ndarray. Volume to transform.
    @param target  : ndarray, optional.
      If (default: None), perform a basic transform using the other params.
      If ndarray, use as reference static image for registration.
    @param ROI  : tuple, optional.
      Ignored if `target` is unspecified.
      Region within `target` to use for registration.
      [default: ((0, None),)] for whole volume. Use e.g.:
      ((0, None), (100, -120), (110, -110)) to mean [0:, 100:-120, 110:-110].
    @param target_shape  : tuple, optional.
      Ignored if `target` is specified.
    @param src_offset  : tuple, optional.
      Static initial translation [default: (0, 0, 0)].
      Useful when no `target` is specified.
    @param method  : str, optional.
      [default: "CoM"]  : centre of mass.
    """
    from dipy.align.imaffine import AffineMap, transform_centers_of_mass

    assert src.ndim == 3
    if target is not None:
        assert target.ndim == 3
    assert len(target_shape) == 3
    assert len(src_resolution) == 3
    assert len(target_resolution) == 3

    if ROI is None:
        ROI = ((0, None), )
    ROI = tuple(slice(i, j) for i, j in ROI)
    if src_offset is None:
        src_offset = (0, 0, 0)
    method = method.lower()

    moving = src
    # scale
    affine_init = np.diag((src_resolution / target_resolution).tolist() + [1])
    # centre offset
    affine_init[:3, -1] = target.shape if target is not None else target_shape
    affine_init[:3, -1] -= moving.shape * src_resolution / target_resolution
    affine_init[:3, -1] /= 2
    affine_init[:3, -1] += src_offset
    affine_map = AffineMap(
        np.eye(4),
        target_shape,
        np.eye(4),  # unmoved target
        moving.shape,
        affine_init)
    src = affine_map.transform(moving)

    if target is not None:
        static = target
        if np.isnan(static).sum():
            log.warn("NaNs in target reference - skipping")
        else:
            # remove noise outside ROI
            msk = np.zeros_like(static)
            msk[ROI] = 1
            msk = affine_map.transform_inverse(msk)
            moving = np.array(moving)
            moving[msk == 0] = 0

            if method == "com":
                method = transform_centers_of_mass(static, np.eye(4), moving,
                                                   affine_init)
            else:
                raise KeyError("Unknown method:" + method)
            src = method.transform(moving)

    if dtype is not None:
        src = src.astype(dtype)
    return src
Ejemplo n.º 21
0
static_grid2world = static_affine
"""
Let's create a moving image by transforming the static image.

"""

affmat = np.eye(4)
affmat[0, -1] = 4
affmat[1, -1] = 12
theta = 0.1
c, s = np.cos(theta), np.sin(theta)
affmat[0:2, 0:2] = np.array([[c, -s], [s, c]])
affine_map = AffineMap(affmat, static.shape, static_grid2world, static.shape,
                       static_grid2world)
moving = affine_map.transform(static)
moving_affine = static_affine.copy()
moving_grid2world = static_grid2world.copy()

regtools.overlay_slices(static, moving, None, 2, "Static", "Moving",
                        "deregistered.png")
"""
.. figure:: deregistered.png
   :align: center

   Same images but misaligned.
"""
"""
Let's make some registration settings.
"""
Ejemplo n.º 22
0
    def run(self,
            static_image_files,
            moving_image_files,
            transform_map_file,
            transform_type='affine',
            out_dir='',
            out_file='transformed.nii.gz'):
        """
        Parameters
        ----------
        static_image_files : string
            Path of the static image file.

        moving_image_files : string
            Path of the moving image(s). It can be a single image or a
            folder containing multiple images.

        transform_map_file : string
            For the affine case, it should be a text(*.txt) file containing
            the affine matrix. For the diffeomorphic case,
            it should be a nifti file containing the mapping displacement
            field in each voxel with this shape (x, y, z, 3, 2)

        transform_type : string, optional
            Select the transformation type to apply between 'affine' or
            'diffeomorphic'. (default affine)

        out_dir : string, optional
            Directory to save the transformed files (default '').

        out_file : string, optional
            Name of the transformed file (default 'transformed.nii.gz').
             It is recommended to use the flag --mix-names to
              prevent the output files from being overwritten.

        """
        if transform_type.lower() not in ['affine', 'diffeomorphic']:
            raise ValueError("Invalid transformation type: Please"
                             " provide a valid transform like 'affine'"
                             " or 'diffeomorphic'")

        io = self.get_io_iterator()

        for static_image_file, moving_image_file, transform_file, \
                out_file in io:

            # Loading the image data from the input files into object.
            static_image, static_grid2world = load_nifti(static_image_file)
            moving_image, moving_grid2world = load_nifti(moving_image_file)

            # Doing a sanity check for validating the dimensions of the input
            # images.
            check_dimensions(static_image, moving_image)

            if transform_type.lower() == 'affine':
                # Loading the affine matrix.
                affine_matrix = np.loadtxt(transform_file)

                # Setting up the affine transformation object.
                mapping = AffineMap(affine=affine_matrix,
                                    domain_grid_shape=static_image.shape,
                                    domain_grid2world=static_grid2world,
                                    codomain_grid_shape=moving_image.shape,
                                    codomain_grid2world=moving_grid2world)

            elif transform_type.lower() == 'diffeomorphic':
                # Loading the diffeomorphic map.
                disp_data, disp_affine = load_nifti(transform_file)

                mapping = DiffeomorphicMap(
                    3,
                    disp_data.shape[:3],
                    disp_grid2world=np.linalg.inv(disp_affine),
                    domain_shape=static_image.shape,
                    domain_grid2world=static_grid2world,
                    codomain_shape=moving_image.shape,
                    codomain_grid2world=moving_grid2world)

                mapping.forward = disp_data[..., 0]
                mapping.backward = disp_data[..., 1]
                mapping.is_inverse = True

            # Transforming the image/
            transformed = mapping.transform(moving_image)

            save_nifti(out_file, transformed, affine=static_grid2world)
Ejemplo n.º 23
0
def resample(moving, static, moving_grid2world, static_grid2world):
    """Resample an image from one space to another."""
    identity = np.eye(4)
    affine_map = AffineMap(identity, static.shape, static_grid2world, moving.shape, moving_grid2world)
    resampled = affine_map.transform(moving)
def register_save(inputpathdir, target_path, subject, outputpath, figspath,
                  params, registration_types, applydirs, verbose):
    anat_path = get_anat(inputpathdir, subject)
    #myanat = load_nifti(anat_path)
    myanat = nib.load(anat_path)
    anat_data = np.squeeze(myanat.get_data()[..., 0])
    anat_affine = myanat.affine
    anat_hdr = myanat.header
    vox_size = myanat.header.get_zooms()[0]
    #mynifti = load_nifti("/Volumes/Data/Badea/Lab/19abb14/N57437_nii4D.nii")
    #anat_data = np.squeeze(myanat[0])[..., 0]
    #anat_affine = myanat[1]
    #hdr = myanat.header

    mytarget = nib.load(target_path)
    target_data = np.squeeze(mytarget.get_data()[..., 0])
    target_affine = mytarget.affine

    identity = np.eye(4)

    affine_map = AffineMap(identity, target_data.shape, target_affine,
                           anat_data.shape, anat_affine)
    resampled = affine_map.transform(anat_data)
    """
    regtools.overlay_slices(target_data, resampled, None, 0,
                            "target_data", "anat_data", figspath + "resampled_0.png")
    regtools.overlay_slices(target_data, resampled, None, 1,
                            "target_data", "anat_data", figspath + "resampled_1.png")
    regtools.overlay_slices(target_data, resampled, None, 2,
                            "target_data", "anat_data", figspath + "resampled_2.png")
    """
    c_of_mass = transform_centers_of_mass(target_data, target_affine,
                                          anat_data, anat_affine)
    apply_niftis = []
    apply_trks = []
    if inputpathdir in applydirs:
        applyfiles = [anat_path]
    else:
        applyfiles = []
    for applydir in applydirs:
        apply_niftis.extend(get_niftis(applydir, subject))
        apply_trks.extend(get_trks(applydir, subject))

    if "center_mass" in registration_types:

        if apply_trks:
            metric = CCMetric(3)
            level_iters = [10, 10, 5]
            sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
            mapping = sdr.optimize(target_data, anat_data, target_affine,
                                   anat_affine, c_of_mass.affine)

        for apply_nifti in apply_niftis:
            fname = os.path.basename(apply_nifti).split(".")[0]
            fpath = outputpath + fname + "_centermass.nii"
            applynii = nib.load(apply_nifti)
            apply_data = applynii.get_data()
            apply_affine = applynii.affine
            apply_hdr = myanat.header

            if len(np.shape(apply_data)) == 4:
                transformed_all = c_of_mass.transform(apply_data, apply4D=True)
                transformed = transformed_all[:, :, :, 0]
            else:
                transformed_all = c_of_mass.transform(apply_data)
                transformed = transformed_all
            save_nifti(fpath, transformed_all, apply_affine, hdr=apply_hdr)
            if figspath is not None:
                regtools.overlay_slices(target_data, transformed, None, 0,
                                        "target_data", "Transformed",
                                        figspath + fname + "_centermass_1.png")
                regtools.overlay_slices(target_data, transformed, None, 1,
                                        "target_data", "Transformed",
                                        figspath + fname + "_centermass_2.png")
                regtools.overlay_slices(target_data, transformed, None, 2,
                                        "target_data", "Transformed",
                                        figspath + fname + "_centermass_3.png")
            if verbose:
                print("Saved the file at " + fpath)
        #mapping = sdr.optimize(target_data, anat_data, target_affine, anat_affine,
        #                       c_of_mass.affine)
        #warped_moving = mapping.transform(anat_data)
        for apply_trk in apply_trks:

            fname = os.path.basename(apply_trk).split(".")[0]
            fpath = outputpath + fname + "_centermass.trk"

            sft = load_tractogram(apply_trk, 'same')
            target_isocenter = np.diag(
                np.array([-vox_size, vox_size, vox_size, 1]))
            origin_affine = affine_map.affine.copy()
            origin_affine[0][3] = -origin_affine[0][3]
            origin_affine[1][3] = -origin_affine[1][3]
            origin_affine[2][3] = origin_affine[2][3] / vox_size

            origin_affine[1][3] = origin_affine[1][3] / vox_size**2

            # Apply the deformation and correct for the extents
            mni_streamlines = deform_streamlines(
                sft.streamlines,
                deform_field=mapping.get_forward_field(),
                stream_to_current_grid=target_isocenter,
                current_grid_to_world=origin_affine,
                stream_to_ref_grid=target_isocenter,
                ref_grid_to_world=np.eye(4))

            if has_fury:

                show_template_bundles(mni_streamlines,
                                      anat_data,
                                      show=False,
                                      fname=figspath + fname +
                                      '_streamlines_centermass.png')

            sft = StatefulTractogram(mni_streamlines, myanat, Space.RASMM)

            save_tractogram(sft, fpath, bbox_valid_check=False)
            if verbose:
                print("Saved the file at " + fpath)

    metric = MutualInformationMetric(params.nbins, params.sampling_prop)

    if "AffineRegistration" in registration_types:
        affreg = AffineRegistration(metric=metric,
                                    level_iters=params.level_iters,
                                    sigmas=params.sigmas,
                                    factors=params.factors)

        transform = TranslationTransform3D()
        params0 = None
        starting_affine = c_of_mass.affine
        translation = affreg.optimize(target_data,
                                      anat_data,
                                      transform,
                                      params0,
                                      target_affine,
                                      anat_affine,
                                      starting_affine=starting_affine)

        if apply_trks:
            metric = CCMetric(3)
            level_iters = [10, 10, 5]
            sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
            mapping = sdr.optimize(target_data, anat_data, target_affine,
                                   anat_affine, translation.affine)

        for apply_nifti in apply_niftis:
            fname = os.path.basename(apply_nifti).split(".")[0]
            fpath = outputpath + fname + "_affinereg.nii"

            applynii = nib.load(apply_nifti)
            apply_data = applynii.get_data()
            apply_affine = applynii.affine
            apply_hdr = myanat.header

            if len(np.shape(apply_data)) == 4:
                transformed_all = translation.transform(apply_data,
                                                        apply4D=True)
                transformed = transformed_all[:, :, :, 0]
            else:
                transformed_all = translation.transform(apply_data)
                transformed = transformed_all
            save_nifti(fpath, transformed_all, anat_affine, hdr=anat_hdr)
            if figspath is not None:
                regtools.overlay_slices(target_data, transformed, None, 0,
                                        "target_data", "Transformed",
                                        figspath + fname + "_affinereg_1.png")
                regtools.overlay_slices(target_data, transformed, None, 1,
                                        "target_data", "Transformed",
                                        figspath + fname + "_affinereg_2.png")
                regtools.overlay_slices(target_data, transformed, None, 2,
                                        "target_data", "Transformed",
                                        figspath + fname + "_affinereg_3.png")
            if verbose:
                print("Saved the file at " + fpath)

        for apply_trk in apply_trks:

            fname = os.path.basename(apply_trk).split(".")[0]
            fpath = outputpath + fname + "_affinereg.trk"

            sft = load_tractogram(apply_trk, 'same')
            target_isocenter = np.diag(
                np.array([-vox_size, vox_size, vox_size, 1]))
            origin_affine = affine_map.affine.copy()
            origin_affine[0][3] = -origin_affine[0][3]
            origin_affine[1][3] = -origin_affine[1][3]
            origin_affine[2][3] = origin_affine[2][3] / vox_size

            origin_affine[1][3] = origin_affine[1][3] / vox_size**2

            # Apply the deformation and correct for the extents
            mni_streamlines = deform_streamlines(
                sft.streamlines,
                deform_field=mapping.get_forward_field(),
                stream_to_current_grid=target_isocenter,
                current_grid_to_world=origin_affine,
                stream_to_ref_grid=target_isocenter,
                ref_grid_to_world=np.eye(4))

            if has_fury:

                show_template_bundles(mni_streamlines,
                                      anat_data,
                                      show=False,
                                      fname=figspath + fname +
                                      '_streamlines_affinereg.png')

            sft = StatefulTractogram(mni_streamlines, myanat, Space.RASMM)

            save_tractogram(sft, fpath, bbox_valid_check=False)
            if verbose:
                print("Saved the file at " + fpath)

    if "RigidTransform3D" in registration_types:
        transform = RigidTransform3D()
        params0 = None
        if 'translation' not in locals():
            affreg = AffineRegistration(metric=metric,
                                        level_iters=params.level_iters,
                                        sigmas=params.sigmas,
                                        factors=params.factors)
            translation = affreg.optimize(target_data,
                                          anat_data,
                                          transform,
                                          params0,
                                          target_affine,
                                          anat_affine,
                                          starting_affine=c_of_mass.affine)
        starting_affine = translation.affine
        rigid = affreg.optimize(target_data,
                                anat_data,
                                transform,
                                params0,
                                target_affine,
                                anat_affine,
                                starting_affine=starting_affine)

        transformed = rigid.transform(anat_data)

        if apply_trks:
            metric = CCMetric(3)
            level_iters = [10, 10, 5]
            sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
            mapping = sdr.optimize(target_data, anat_data, target_affine,
                                   anat_affine, rigid.affine)

        for apply_nifti in apply_niftis:
            fname = os.path.basename(apply_nifti).split(".")[0]
            fpath = outputpath + fname + "_rigidtransf3d.nii"

            applynii = nib.load(apply_nifti)
            apply_data = applynii.get_data()
            apply_affine = applynii.affine
            apply_hdr = myanat.header

            if len(np.shape(apply_data)) == 4:
                transformed_all = rigid.transform(apply_data, apply4D=True)
                transformed = transformed_all[:, :, :, 0]
            else:
                transformed_all = rigid.transform(apply_data)
                transformed = transformed_all
            save_nifti(fpath, transformed_all, anat_affine, hdr=anat_hdr)
            if figspath is not None:
                regtools.overlay_slices(
                    target_data, transformed, None, 0, "target_data",
                    "Transformed", figspath + fname + "_rigidtransf3d_1.png")
                regtools.overlay_slices(
                    target_data, transformed, None, 1, "target_data",
                    "Transformed", figspath + fname + "_rigidtransf3d_2.png")
                regtools.overlay_slices(
                    target_data, transformed, None, 2, "target_data",
                    "Transformed", figspath + fname + "_rigidtransf3d_3.png")
            if verbose:
                print("Saved the file at " + fpath)

        for apply_trk in apply_trks:

            fname = os.path.basename(apply_trk).split(".")[0]
            fpath = outputpath + fname + "_rigidtransf3d.trk"

            sft = load_tractogram(apply_trk, 'same')
            target_isocenter = np.diag(
                np.array([-vox_size, vox_size, vox_size, 1]))
            origin_affine = affine_map.affine.copy()
            origin_affine[0][3] = -origin_affine[0][3]
            origin_affine[1][3] = -origin_affine[1][3]
            origin_affine[2][3] = origin_affine[2][3] / vox_size

            origin_affine[1][3] = origin_affine[1][3] / vox_size**2

            # Apply the deformation and correct for the extents
            mni_streamlines = deform_streamlines(
                sft.streamlines,
                deform_field=mapping.get_forward_field(),
                stream_to_current_grid=target_isocenter,
                current_grid_to_world=origin_affine,
                stream_to_ref_grid=target_isocenter,
                ref_grid_to_world=np.eye(4))

            if has_fury:

                show_template_bundles(mni_streamlines,
                                      anat_data,
                                      show=False,
                                      fname=figspath + fname +
                                      '_rigidtransf3d.png')

            sft = StatefulTractogram(mni_streamlines, myanat, Space.RASMM)

            save_tractogram(sft, fpath, bbox_valid_check=False)
            if verbose:
                print("Saved the file at " + fpath)

    if "AffineTransform3D" in registration_types:
        transform = AffineTransform3D()
        params0 = None
        starting_affine = rigid.affine
        affine = affreg.optimize(target_data,
                                 anat_data,
                                 transform,
                                 params0,
                                 target_affine,
                                 anat_affine,
                                 starting_affine=starting_affine)

        transformed = affine.transform(anat_data)

        if apply_trks:
            metric = CCMetric(3)
            level_iters = [10, 10, 5]
            sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
            mapping = sdr.optimize(target_data, anat_data, target_affine,
                                   anat_affine, affine.affine)

        for apply_nifti in apply_niftis:
            fname = os.path.basename(apply_nifti).split(".")[0]
            fpath = outputpath + fname + "_affinetransf3d.nii"

            applynii = nib.load(apply_nifti)
            apply_data = applynii.get_data()
            apply_affine = applynii.affine
            apply_hdr = myanat.header

            if len(np.shape(apply_data)) == 4:
                transformed_all = affine.transform(apply_data, apply4D=True)
                transformed = transformed_all[:, :, :, 0]
            else:
                transformed_all = affine.transform(apply_data)
                transformed = transformed_all
            save_nifti(fpath, transformed_all, anat_affine, hdr=anat_hdr)
            if figspath is not None:
                regtools.overlay_slices(
                    target_data, transformed, None, 0, "target_data",
                    "Transformed", figspath + fname + "_affinetransf3d_1.png")
                regtools.overlay_slices(
                    target_data, transformed, None, 1, "target_data",
                    "Transformed", figspath + fname + "_affinetransf3d_2.png")
                regtools.overlay_slices(
                    target_data, transformed, None, 2, "target_data",
                    "Transformed", figspath + fname + "_affinetransf3d_3.png")
            if verbose:
                print("Saved the file at " + fpath)

        for apply_trk in apply_trks:

            fname = os.path.basename(apply_trk).split(".")[0]
            fpath = outputpath + fname + "_affinetransf3d.trk"

            sft = load_tractogram(apply_trk, 'same')
            target_isocenter = np.diag(
                np.array([-vox_size, vox_size, vox_size, 1]))
            origin_affine = affine_map.affine.copy()
            origin_affine[0][3] = -origin_affine[0][3]
            origin_affine[1][3] = -origin_affine[1][3]
            origin_affine[2][3] = origin_affine[2][3] / vox_size

            origin_affine[1][3] = origin_affine[1][3] / vox_size**2

            # Apply the deformation and correct for the extents
            mni_streamlines = deform_streamlines(
                sft.streamlines,
                deform_field=mapping.get_forward_field(),
                stream_to_current_grid=target_isocenter,
                current_grid_to_world=origin_affine,
                stream_to_ref_grid=target_isocenter,
                ref_grid_to_world=np.eye(4))

            if has_fury:

                show_template_bundles(mni_streamlines,
                                      anat_data,
                                      show=False,
                                      fname=figspath + fname +
                                      '_streamlines_affinetransf3d.png')

            sft = StatefulTractogram(mni_streamlines, myanat, Space.RASMM)

            save_tractogram(sft, fpath, bbox_valid_check=False)
            if verbose:
                print("Saved the file at " + fpath)
Ejemplo n.º 25
0
moving = np.array(nib_syn_b0.get_data())
moving_grid2world = nib_syn_b0.affine

"""
We can see that the images are far from aligned by drawing one on top of
the other. The images don't even have the same number of voxels, so in order
to draw one on top of the other we need to resample the moving image on a grid
of the same dimensions as the static image, we can do this by "transforming"
the moving image using an identity transform
"""

identity = np.eye(4)
affine_map = AffineMap(identity,
                       static.shape, static_grid2world,
                       moving.shape, moving_grid2world)
resampled = affine_map.transform(moving)
regtools.overlay_slices(static, resampled, None, 0,
                        "Static", "Moving", "resampled_0.png")
regtools.overlay_slices(static, resampled, None, 1,
                        "Static", "Moving", "resampled_1.png")
regtools.overlay_slices(static, resampled, None, 2,
                        "Static", "Moving", "resampled_2.png")

"""
.. figure:: resampled_0.png
   :align: center
.. figure:: resampled_1.png
   :align: center
.. figure:: resampled_2.png
   :align: center
Ejemplo n.º 26
0
"""
As we did in the 2D example, we would like to visualize (some slices of) the
two volumes by overlapping them over two channels of a color image. To do that
we need them to be sampled on the same grid, so let's first re-sample the
moving image on the static grid. We create an AffineMap to transform the moving
image towards the static image
"""

if True:
    # use CPU-based AffineMap until GPU implementation is completed
    from dipy.align.imaffine import AffineMap

    affine_map = AffineMap(pre_align, static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = cp.asarray(affine_map.transform(moving.get()))
else:
    # TODO: implement AffineMap on the GPU
    from cudipy.align.imaffine import AffineMap

    affine_map = AffineMap(pre_align, static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)
"""
plot the overlapped middle slices of the volumes
"""

regtools.overlay_slices(static, resampled, None, 1, "Static", "Moving",
                        "input_3d.png")
"""
Ejemplo n.º 27
0
def registration(diff, affine_diff, anat, affine_anat):
    #Affine trasformation beetween diffuson and anatomical data
    static = np.squeeze(diff)[..., 0]
    static_grid2world = affine_diff

    moving = anat
    moving_grid2world = affine_anat

    identity = np.eye(4)
    affine_map = AffineMap(identity, static.shape, static_grid2world,
                           moving.shape, moving_grid2world)
    resampled = affine_map.transform(moving)
    regtools.overlay_slices(static, resampled, None, 0, "Static", "Moving",
                            "resampled_0.png")
    regtools.overlay_slices(static, resampled, None, 1, "Static", "Moving",
                            "resampled_1.png")
    regtools.overlay_slices(static, resampled, None, 2, "Static", "Moving",
                            "resampled_2.png")

    c_of_mass = transform_centers_of_mass(static, static_grid2world, moving,
                                          moving_grid2world)

    transformed = c_of_mass.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_com_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_com_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_com_2.png")

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    factors = [4, 2, 1]
    sigmas = [3.0, 1.0, 0.0]
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(static,
                                  moving,
                                  transform,
                                  params0,
                                  static_grid2world,
                                  moving_grid2world,
                                  starting_affine=starting_affine)

    transformed = translation.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_trans_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_trans_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_trans_2.png")

    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(static,
                            moving,
                            transform,
                            params0,
                            static_grid2world,
                            moving_grid2world,
                            starting_affine=starting_affine)

    transformed = rigid.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_rigid_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_rigid_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_rigid_2.png")

    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(static,
                             moving,
                             transform,
                             params0,
                             static_grid2world,
                             moving_grid2world,
                             starting_affine=starting_affine)
    transformed = affine.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_affine_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_affine_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_affine_2.png")

    inverse_map = AffineMap(starting_affine, static.shape, static_grid2world,
                            moving.shape, moving_grid2world)
    resampled_inverse = inverse_map.transform_inverse(transformed,
                                                      resample_only=True)
    nib.save(nib.Nifti1Image(resampled_inverse, affine_diff),
             'brain.coreg.nii.gz')
    return transformed
Ejemplo n.º 28
0
def affine_registration(moving,
                        static,
                        moving_affine=None,
                        static_affine=None,
                        pipeline=None,
                        starting_affine=None,
                        metric='MI',
                        level_iters=None,
                        sigmas=None,
                        factors=None,
                        ret_metric=False,
                        **metric_kwargs):
    """
    Find the affine transformation between two 3D images. Alternatively, find
    the combination of several linear transformations.

    Parameters
    ----------
    moving : array, nifti image or str
        Containing the data for the moving object, or full path to a nifti file
        with the moving data.

    static : array, nifti image or str
        Containing the data for the static object, or full path to a nifti file
        with the moving data.

    moving_affine : 4x4 array, optional
        An affine transformation associated with the moving object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    static_affine : 4x4 array, optional
        An affine transformation associated with the static object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    pipeline : list of str, optional
        Sequence of transforms to use in the gradual fitting. Default: gradual
        fit of the full affine (executed from left to right):
        ``["center_of_mass", "translation", "rigid", "affine"]``
        Alternatively, any other combination of the following registration
        methods might be used: center_of_mass, translation, rigid,
        rigid_isoscaling, rigid_scaling and affine.

    starting_affine: 4x4 array, optional
        Initial guess for the transformation between the spaces.
        Default: identity.

    metric : str, optional.
        Currently only supports 'MI' for MutualInformationMetric.

    level_iters : sequence, optional
        AffineRegistration key-word argument: the number of iterations at each
        scale of the scale space. `level_iters[0]` corresponds to the coarsest
        scale, `level_iters[-1]` the finest, where n is the length of the
        sequence. By default, a 3-level scale space with iterations
        sequence equal to [10000, 1000, 100] will be used.

    sigmas : sequence of floats, optional
        AffineRegistration key-word argument: custom smoothing parameter to
        build the scale space (one parameter for each scale). By default,
        the sequence of sigmas will be [3, 1, 0].

    factors : sequence of floats, optional
        AffineRegistration key-word argument: custom scale factors to build the
        scale space (one factor for each scale). By default, the sequence of
        factors will be [4, 2, 1].

    ret_metric : boolean, optional
        Set it to True to return the value of the optimized coefficients and
        the optimization quality metric.

    nbins : int, optional
        MutualInformationMetric key-word argument: the number of bins to be
        used for computing the intensity histograms. The default is 32.

    sampling_proportion : None or float in interval (0, 1], optional
        MutualInformationMetric key-word argument: There are two types of
        sampling: dense and sparse. Dense sampling uses all voxels for
        estimating the (joint and marginal) intensity histograms, while
        sparse sampling uses a subset of them. If `sampling_proportion` is
        None, then dense sampling is used. If `sampling_proportion` is a
        floating point value in (0,1] then sparse sampling is used,
        where `sampling_proportion` specifies the proportion of voxels to
        be used. The default is None (dense sampling).

    Returns
    -------
    transformed : array with moving data resampled to the static space
    after computing the affine transformation
    affine : the affine 4x4 associated with the transformation.
    xopt : the value of the optimized coefficients.
    fopt : the value of the optimization quality metric.

    Notes
    -----
    Performs a gradual registration between the two inputs, using a pipeline
    that gradually approximates the final registration. If the final default
    step (`affine`) is ommitted, the resulting affine may not have all 12
    degrees of freedom adjusted.
    """
    pipeline = pipeline or ["center_of_mass", "translation", "rigid", "affine"]
    level_iters = level_iters or [10000, 1000, 100]
    sigmas = sigmas or [3, 1, 0.0]
    factors = factors or [4, 2, 1]

    static, static_affine, moving, moving_affine, starting_affine = \
        _handle_pipeline_inputs(moving, static,
                                moving_affine=moving_affine,
                                static_affine=static_affine,
                                starting_affine=starting_affine)

    # Define the Affine registration object we'll use with the chosen metric.
    # For now, there is only one metric (mutual information)
    use_metric = affine_metric_dict[metric](**metric_kwargs)

    affreg = AffineRegistration(metric=use_metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    # Convert pipeline to sanitized list of str
    pipeline = list(pipeline)
    for fi, func in enumerate(pipeline):
        if callable(func):
            for key, val in _METHOD_DICT.items():
                if func is val[0]:  # if they passed the callable equiv.
                    pipeline[fi] = func = key
                    break
        if not isinstance(func, str) or func not in _METHOD_DICT:
            raise ValueError(f'pipeline[{fi}] must be one of '
                             f'{list(_METHOD_DICT)}, got {repr(func)}')

    if pipeline == ["center_of_mass"] and ret_metric:
        raise ValueError("center of mass registration cannot return any "
                         "quality metric.")

    # Go through the selected transformation:
    for func in pipeline:
        if func == "center_of_mass":
            transform = transform_centers_of_mass(static, static_affine,
                                                  moving, moving_affine)
            starting_affine = transform.affine
        else:
            transform = _METHOD_DICT[func][1]()
            xform, xopt, fopt \
                = affreg.optimize(static, moving, transform, None,
                                  static_affine, moving_affine,
                                  starting_affine=starting_affine,
                                  ret_metric=True)
            starting_affine = xform.affine

    # After doing all that, resample once at the end:
    affine_map = AffineMap(starting_affine, static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)

    # Return the optimization metric only if requested
    if ret_metric:
        return resampled, starting_affine, xopt, fopt
    return resampled, starting_affine
Ejemplo n.º 29
0
def img_reg(moving_img, target_img, reg='non-lin'):

    m_img = nib.load(moving_img)
    t_img = nib.load(target_img)

    m_img_data = m_img.get_data()
    t_img_data = t_img.get_data()

    m_img_affine = m_img.affine
    t_img_affine = t_img.affine

    identity = np.eye(4)
    affine_map = AffineMap(identity, t_img_data.shape, t_img_affine,
                           m_img_data.shape, m_img_affine)

    m_img_resampled = affine_map.transform(m_img_data)

    c_of_mass = transform_centers_of_mass(t_img_data, t_img_affine, m_img_data,
                                          m_img_affine)

    tf_m_img_c_mass = c_of_mass.transform(m_img_data)

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10, 10, 5]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(t_img_data,
                                  m_img_data,
                                  transform,
                                  params0,
                                  t_img_affine,
                                  m_img_affine,
                                  starting_affine=starting_affine)

    tf_m_img_translat = translation.transform(m_img_data)

    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(t_img_data,
                            m_img_data,
                            transform,
                            params0,
                            t_img_affine,
                            m_img_affine,
                            starting_affine=starting_affine)

    tf_m_img_rigid = rigid.transform(m_img_data)

    transform = AffineTransform3D()
    affreg.level_iters = [10, 10, 10]
    affine = affreg.optimize(t_img_data,
                             m_img_data,
                             transform,
                             params0,
                             t_img_affine,
                             m_img_affine,
                             starting_affine=rigid.affine)

    if reg is None or reg == 'non-lin':

        metric = CCMetric(3)
        level_iters = [10, 10, 5]
        sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

        mapping = sdr.optimize(t_img_data, m_img_data, t_img_affine,
                               m_img_affine, affine.affine)

        tf_m_img = mapping.transform(m_img_data)

    elif reg == 'affine':

        tf_m_img_aff = affine.transform(m_img_data)

    return tf_m_img

    metric = CCMetric(3)

    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(t_img_data,
                           m_img_data,
                           t_img_affine,
                           m_img_affine,
                           starting_affine=affine.affine)

    tf_m_img = mapping.transform(m_img_data)
Ejemplo n.º 30
0
def affine_registration(moving,
                        static,
                        moving_affine=None,
                        static_affine=None,
                        pipeline=None,
                        starting_affine=None,
                        metric='MI',
                        level_iters=None,
                        sigmas=None,
                        factors=None,
                        **metric_kwargs):
    """
    Find the affine transformation between two 3D images.

    Parameters
    ----------
    moving : array, nifti image or str
        Containing the data for the moving object, or full path to a nifti file
        with the moving data.

    moving_affine : 4x4 array, optional
        An affine transformation associated with the moving object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    static : array, nifti image or str
        Containing the data for the static object, or full path to a nifti file
        with the moving data.

    static_affine : 4x4 array, optional
        An affine transformation associated with the static object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    pipeline : sequence, optional
        Sequence of transforms to use in the gradual fitting of the full
        affine. Default: (executed from left to right):
        `[center_of_mass, translation, rigid, affine]`

    starting_affine: 4x4 array, optional
        Initial guess for the transformation between the spaces.
        Default: identity.

    metric : str, optional.
        Currently only supports 'MI' for MutualInformationMetric.

    nbins : int, optional
        MutualInformationMetric key-word argument: the number of bins to be
        used for computing the intensity histograms. The default is 32.

    sampling_proportion : None or float in interval (0, 1], optional
        MutualInformationMetric key-word argument: There are two types of
        sampling: dense and sparse. Dense sampling uses all voxels for
        estimating the (joint and marginal) intensity histograms, while
        sparse sampling uses a subset of them. If `sampling_proportion` is
        None, then dense sampling is used. If `sampling_proportion` is a
        floating point value in (0,1] then sparse sampling is used,
        where `sampling_proportion` specifies the proportion of voxels to
        be used. The default is None (dense sampling).

    level_iters : sequence, optional
        AffineRegistration key-word argument: the number of iterations at each
        scale of the scale space. `level_iters[0]` corresponds to the coarsest
        scale, `level_iters[-1]` the finest, where n is the length of the
        sequence. By default, a 3-level scale space with iterations
        sequence equal to [10000, 1000, 100] will be used.

    sigmas : sequence of floats, optional
        AffineRegistration key-word argument: custom smoothing parameter to
        build the scale space (one parameter for each scale). By default,
        the sequence of sigmas will be [3, 1, 0].

    factors : sequence of floats, optional
        AffineRegistration key-word argument: custom scale factors to build the
        scale space (one factor for each scale). By default, the sequence of
        factors will be [4, 2, 1].

    Returns
    -------
    transformed, affine : array with moving data resampled to the static space
    after computing the affine transformation and the affine 4x4
    associated with the transformation.


    Notes
    -----
    Performs a gradual registration between the two inputs, using a pipeline
    that gradually approximates the final registration. If the final default
    step (`affine`) is ommitted, the resulting affine may not have all 12
    degrees of freedom adjusted.
    """
    pipeline = pipeline or [center_of_mass, translation, rigid, affine]
    level_iters = level_iters or [10000, 1000, 100]
    sigmas = sigmas or [3, 1, 0.0]
    factors = factors or [4, 2, 1]

    static, static_affine, moving, moving_affine, starting_affine = \
        _handle_pipeline_inputs(moving, static,
                                moving_affine=moving_affine,
                                static_affine=static_affine,
                                starting_affine=starting_affine)

    # Define the Affine registration object we'll use with the chosen metric.
    # For now, there is only one metric (mutual information)
    use_metric = affine_metric_dict[metric](**metric_kwargs)

    affreg = AffineRegistration(metric=use_metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    # Go through the selected transformation:
    for func in pipeline:
        starting_affine = func(moving,
                               static,
                               static_affine=static_affine,
                               moving_affine=moving_affine,
                               starting_affine=starting_affine,
                               reg=affreg)

    # After doing all that, resample once at the end:
    affine_map = AffineMap(starting_affine, static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)

    return resampled, starting_affine
Ejemplo n.º 31
0
def anatOverlay(dwi, t1):
    if t1['file'].split("acq-")[-1] != t1['file']:
        t1_acq = '_acq-' + t1['file'].split("acq-")[-1].split("_")[0]
    else:
        t1_acq = ''

    imgT1 = nib.load(t1['file'])
    img = nib.load(dwi['denoised'])

    b0_affine = img.affine
    b0 = dwi['b0']

    b0_mask = dwi['mask']

    b0 = b0 * b0_mask
    t1 = imgT1.get_data()
    t1_affine = imgT1.affine

    (t1_affine, perm, flip_sign) = helper.fixImageHeader(imgT1)

    t1 = np.transpose(t1, perm)

    if flip_sign[0] < 0:
        t1 = t1[::-1, :, :]

    if flip_sign[1] < 0:
        t1 = t1[:, ::-1, :]

    if flip_sign[2] < 0:
        t1 = t1[:, :, ::-1]

    affine_map = AffineMap(np.eye(4), t1.shape, t1_affine, b0.shape, b0_affine)

    resampled = affine_map.transform(np.array(b0))

    # Normalize the input images to [0,255]
    t1 = helper.normImg(t1)
    b0 = helper.normImg(resampled)

    overlay = np.zeros(shape=(t1.shape) + (3, ), dtype=np.uint8)
    b0_canny = np.zeros(shape=(t1.shape), dtype=np.bool)

    ind = helper.getImgThirds(t1)

    for i in ind[0]:
        b0_canny[:, :, i] = feature.canny(b0[:, :, i], sigma=1.5)

    for i in ind[1]:
        b0_canny[:, i, :] = feature.canny(np.squeeze(b0[:, i, :]), sigma=1.5)

    for i in ind[2]:
        #b0_canny[i-1,:,:] = feature.canny(np.squeeze(b0[i-1,:,:]), sigma=1.5)
        b0_canny[i, :, :] = feature.canny(np.squeeze(b0[i, :, :]), sigma=1.5)
        #b0_canny[i+1,:,:] = feature.canny(np.squeeze(b0[i+1,:,:]), sigma=1.5)

    overlay[..., 0] = t1
    overlay[..., 1] = t1
    overlay[..., 2] = t1
    overlay[..., 0] = b0_canny * 255

    voxSize = imgT1.header['pixdim'][1:4]

    helper.plotFig(overlay, 'alignment DWI -> T1', voxSize)  #[perm])
    plot_name = 't1' + t1_acq + '_overlay.png'
    plt.savefig(os.path.join(dwi['fig_dir'], plot_name), bbox_inches='tight')
    plt.close()
Ejemplo n.º 32
0
                    help="default: 0.5")
args = parser.parse_args()

img = get_img(args.nifti_file)
voxel_order = "".join(aff2axcodes(img.affine))
gtab = get_gtab(args.bvals, args.bvecs)
mask = get_img(args.mask_nifti)
data = img.get_fdata()

# resample mask if necessary
if mask.shape != data.shape:
    from dipy.align.imaffine import AffineMap
    identity = np.eye(4)
    affine_map = AffineMap(identity, img.shape[:3], img.affine, mask.shape[:3],
                           mask.affine)
    mask = affine_map.transform(mask.get_fdata())
    #mask = np.round(mask)
else:
    mask = mask.get_fdata()

# load or compute and save FA file
if (args.fa_numpy is not None) and os.path.isfile(args.fa_numpy):
    FA = np.load(args.fa_numpy, allow_pickle=True)
else:
    # Fit
    tenmodel = dti.TensorModel(gtab, fit_method='WLS')
    print('Fitting Tensor')
    tenfit = tenmodel.fit(data, mask)
    print('Computing anisotropy measures (FA,MD,RGB)')
    FA = tenfit.fa
    FA[np.isnan(FA)] = 0
Ejemplo n.º 33
0
ftype = moving.dtype.type
out = np.empty(tuple(out_shape) + (dim, ), dtype=ftype)
inside = np.empty(tuple(out_shape), dtype=np.int32)
_gradient_3d(moving, moving_world2grid, moving_spacing, static_grid2world, out,
             inside)

mgrad = np.asarray(out)

from dipy.align.imaffine import AffineMap
dim = len(static.shape)
starting_affine = np.eye(dim + 1)
affine_map = AffineMap(starting_affine, static.shape, static_grid2world,
                       moving.shape, moving_grid2world)

static_values = static
moving_values = affine_map.transform(moving)

from dipy.align.transforms import AffineTransform3D
transform = AffineTransform3D()
params = transform.get_identity_parameters()

from dipy.align.parzenhist import ParzenJointHistogram
nbins = 32
histogram = ParzenJointHistogram(nbins)

static2prealigned = static_grid2world
histogram.update_gradient_dense(params, transform, static_values,
                                moving_values, static2prealigned, mgrad)

np.save('sl_aff_par_jpdf_jgrad.npy', histogram.joint_grad)
Ejemplo n.º 34
0
brainweb_mask = brainweb_strip > 0

brainweb_name = info.get_brainweb("t1", "raw")
brainweb_nib = nib.load(brainweb_name)
brainweb = brainweb_nib.get_data().squeeze()
brainweb_affine = brainweb_nib.get_affine()
brainweb = brainweb.transpose([0, 2, 1])[::-1, :, :]
rt.plot_slices(brainweb)
brainweb_affine = ibsr1_affine.copy()
brainweb_affine[brainweb_affine != 0] = 1
brainweb_affine[0, 0] = -1


# Reslice Brainweb on IBSR1
ibsr_to_bw = AffineMap(None, ibsr1.shape, ibsr1_affine, brainweb.shape, brainweb_affine)
bw_on_ibsr1 = ibsr_to_bw.transform(brainweb)
rt.overlay_slices(ibsr1, bw_on_ibsr1)  # misaligned

c_of_mass = transform_centers_of_mass(ibsr1, ibsr1_affine, brainweb, brainweb_affine)
bw_on_ibsr1 = c_of_mass.transform(brainweb)
rt.overlay_slices(ibsr1, bw_on_ibsr1)  # roughly aligned

# Start affine alignment
aff_name = "ibsr1_to_brainweb.p"
if os.path.isfile(aff_name):
    ibsr_bw_affmap = pickle.load(open(aff_name, "r"))
else:
    ibsr_bw_affmap = dipy_align(ibsr1, ibsr1_affine, brainweb, brainweb_affine)
    pickle.dump(ibsr_bw_affmap, open(aff_name, "w"))
bw_on_ibsr1 = ibsr_bw_affmap.transform(brainweb)
rt.overlay_slices(ibsr1, bw_on_ibsr1, slice_type=0)  # aligned (sagital view)
Ejemplo n.º 35
0
    #hdr = t2.header
    #print(hdr)
    static = t2.get_data()
    static_grid2world = t2.affine
    moving = Diff1.get_data()
    moving_grid2world = Diff1.affine
    moving2 = Diff2.get_data()
    moving2_grid2world = Diff2.affine
    moving3 = Ktrans.get_data()
    moving3_grid2world = Ktrans.affine
    identity = np.eye(4)

    affine_map = AffineMap(identity,
                           static.shape, static_grid2world,
                           moving.shape, moving_grid2world)
    resampled1 = affine_map.transform(moving)
    affine_map2 = AffineMap(identity,
                           static.shape, static_grid2world,
                           moving2.shape, moving2_grid2world)
    resampled2 = affine_map2.transform(moving2)
    affine_mapk = AffineMap(identity,
                           static.shape, static_grid2world,
                           moving3.shape, moving3_grid2world)
    resampledk = affine_mapk.transform(moving3)

    out= np.stack([static.transpose(2,0,1), resampled1.transpose(2,0,1), resampled2.transpose(2,0,1), resampledk.transpose(2,0,1)], axis=-1)
    #print(out.shape)
    patient = smoothslices(out, 19)
    stack += [patient]

print(" ===== DONE Creating Stacks of MRI IMAGES =====\n")
Ejemplo n.º 36
0
    def run(self, static_image_files, moving_image_files, transform_map_file,
            transform_type='affine', out_dir='',
            out_file='transformed.nii.gz'):
        """
        Parameters
        ----------
        static_image_files : string
            Path of the static image file.

        moving_image_files : string
            Path of the moving image(s). It can be a single image or a
            folder containing multiple images.

        transform_map_file : string
            For the affine case, it should be a text(*.txt) file containing
            the affine matrix. For the diffeomorphic case,
            it should be a nifti file containing the mapping displacement
            field in each voxel with this shape (x, y, z, 3, 2)

        transform_type : string, optional
            Select the transformation type to apply between 'affine' or
            'diffeomorphic'. (default affine)

        out_dir : string, optional
            Directory to save the transformed files (default '').

        out_file : string, optional
            Name of the transformed file (default 'transformed.nii.gz').
             It is recommended to use the flag --mix-names to
              prevent the output files from being overwritten.

        """
        if transform_type.lower() not in ['affine', 'diffeomorphic']:
            raise ValueError("Invalid transformation type: Please"
                             " provide a valid transform like 'affine'"
                             " or 'diffeomorphic'")

        io = self.get_io_iterator()

        for static_image_file, moving_image_file, transform_file, \
                out_file in io:

            # Loading the image data from the input files into object.
            static_image, static_grid2world = load_nifti(static_image_file)
            moving_image, moving_grid2world = load_nifti(moving_image_file)

            # Doing a sanity check for validating the dimensions of the input
            # images.
            check_dimensions(static_image, moving_image)

            if transform_type.lower() == 'affine':
                # Loading the affine matrix.
                affine_matrix = np.loadtxt(transform_file)

                # Setting up the affine transformation object.
                mapping = AffineMap(
                    affine=affine_matrix,
                    domain_grid_shape=static_image.shape,
                    domain_grid2world=static_grid2world,
                    codomain_grid_shape=moving_image.shape,
                    codomain_grid2world=moving_grid2world)

            elif transform_type.lower() == 'diffeomorphic':
                # Loading the diffeomorphic map.
                disp = nib.load(transform_file)

                mapping = DiffeomorphicMap(
                    3, disp.shape[:3],
                    disp_grid2world=np.linalg.inv(disp.affine),
                    domain_shape=static_image.shape,
                    domain_grid2world=static_grid2world,
                    codomain_shape=moving_image.shape,
                    codomain_grid2world=moving_grid2world)

                disp_data = disp.get_data()
                mapping.forward = disp_data[..., 0]
                mapping.backward = disp_data[..., 1]
                mapping.is_inverse = True

            # Transforming the image/
            transformed = mapping.transform(moving_image)

            save_nifti(out_file, transformed, affine=static_grid2world)
Ejemplo n.º 37
0
files, folder = fetch_syn_data()
moving_data, moving_affine = load_nifti(pjoin(folder, 'b0.nii.gz'))
moving = moving_data
moving_grid2world = moving_affine
"""
We can see that the images are far from aligned by drawing one on top of
the other. The images don't even have the same number of voxels, so in order
to draw one on top of the other we need to resample the moving image on a grid
of the same dimensions as the static image, we can do this by "transforming"
the moving image using an identity transform
"""

identity = np.eye(4)
affine_map = AffineMap(identity, static.shape, static_grid2world, moving.shape,
                       moving_grid2world)
resampled = affine_map.transform(moving)
regtools.overlay_slices(static, resampled, None, 0, "Static", "Moving",
                        "resampled_0.png")
regtools.overlay_slices(static, resampled, None, 1, "Static", "Moving",
                        "resampled_1.png")
regtools.overlay_slices(static, resampled, None, 2, "Static", "Moving",
                        "resampled_2.png")
"""
.. figure:: resampled_0.png
   :align: center
.. figure:: resampled_1.png
   :align: center
.. figure:: resampled_2.png
   :align: center

   Input images before alignment.
Ejemplo n.º 38
0
def quick_check():

    img1_fname = "/home/omar/data/DATA_NeoBrainS12/T1.nii.gz"
    img2_fname = "/home/omar/data/DATA_NeoBrainS12/set2_i1_t1.nii.gz"

    img1_nib = nib.load(img1_fname)
    img1 = img1_nib.get_data().squeeze()
    img1_affine = img1_nib.get_affine()

    img2_nib = nib.load(img2_fname)
    img2 = img2_nib.get_data().squeeze()
    img2_affine = img2_nib.get_affine()
    # nib.aff2axcodes(img1_affine)
    #aff = AffineMap(None, img1.shape, img1_affine, img2.shape, img2_affine)
    #aff = transform_centers_of_mass(img1, img1_affine, img2, img2_affine)
    aff = dipy_align(img1, img1_affine, img2, img2_affine, np.eye(4))

    img2_resampled = aff.transform(img2)
    rt.overlay_slices(img1, img2_resampled, slice_type=0)
    rt.overlay_slices(img1, img2_resampled, slice_type=1)
    rt.overlay_slices(img1, img2_resampled, slice_type=2)



    # Verify that original and RAS versions of neo1 describe the same object

    # Load original data
    neo1_fname = get_neobrain('train', 1, 'T1')
    neo1_old, neo1_old_affine, neo1_old_spacing, neo1_old_ori = load_from_raw(neo1_fname)

    # Load RAS version
    neo1_nib = nib.load(neo1_fname)
    neo1 = neo1_nib.get_data()
    neo1_affine = neo1_nib.get_affine()

    # Resample RAS on top of original
    aff = AffineMap(None, neo1_old.shape, neo1_old_affine, neo1.shape, neo1_affine)
    neo1_resampled = aff.transform(neo1)
    rt.overlay_slices(neo1_old, neo1_resampled, slice_type=0)
    rt.overlay_slices(neo1_old, neo1_resampled, slice_type=1)
    rt.overlay_slices(neo1_old, neo1_resampled, slice_type=2)


    # Attempt to resample a test volume on top of training
    neo2_fname = get_neobrain('test', 1, 'i1_t1')
    neo2_nib = nib.load(neo2_fname)
    neo2 = neo2_nib.get_data()
    neo2_affine = neo2_nib.get_affine()
    aff = transform_centers_of_mass(neo1, neo1_affine, neo2, neo2_affine)
    #aff = dipy_align(neo1, neo1_affine, neo2, neo2_affine)
    neo2_resampled = aff.transform(neo2)

    rt.overlay_slices(neo1, neo2_resampled, slice_type=0)
    rt.overlay_slices(neo1, neo2_resampled, slice_type=1)
    rt.overlay_slices(neo1, neo2_resampled, slice_type=2)



    # Load atlas
    atlas_fname = get_neobrain('atlas', 'neo-withSkull', None)
    atlas_nib = nib.load(atlas_fname)
    atlas_affine = atlas_nib.get_affine()
    atlas = atlas_nib.get_data()
    rt.plot_slices(atlas)

    # Resample atlas on top of neo1
    aff = AffineMap(None, neo1.shape, neo1_affine, atlas.shape, atlas_affine)
    atlas_resampled = aff.transform(atlas)
    rt.overlay_slices(neo1, atlas_resampled)