Example #1
0
 def estimate_affine2d(self, fixed, moving, tx_tr=None):
     assert len(moving.shape) == len(fixed.shape)
     trans = AffineTransform3D()
     if self.update_map:
         self.metric = MutualInformationMetric(self.nbins,
                                               self.sampling_prop)
         self.affmap = AffineRegistration(
             metric=self.metric,
             level_iters=self.level_iters,
             sigmas=self.sigmas,
             factors=self.factors,
             method=self.method,
             ss_sigma_factor=self.ss_sigma_factor,
             options=self.options,
             verbosity=self.verbosity)
     if tx_tr is None:
         self.update_map = False
         tx_tr = self.estimate_rigid2d(fixed, moving)
         self.update_map = True
     if isinstance(tx_tr, AffineMap):
         tx_tr = tx_tr.affine
     return self.affmap.optimize(fixed,
                                 moving,
                                 trans,
                                 self.params0,
                                 starting_affine=tx_tr)
Example #2
0
def affine(moving, static, static_grid2world, moving_grid2world,
           reg, starting_affine, params0=None):
    transform = AffineTransform3D()
    affine = reg.optimize(static, moving, transform, params0,
                          static_grid2world, moving_grid2world,
                          starting_affine=starting_affine)

    return affine.transform(moving), affine.affine,affine
Example #3
0
def register_image(static,
                   static_grid2world,
                   moving,
                   moving_grid2world,
                   transformation_type='affine',
                   dwi=None):
    if transformation_type not in ['rigid', 'affine']:
        raise ValueError('Transformation type not available in Dipy')

    # Set all parameters for registration
    nbins = 32
    params0 = None
    sampling_prop = None
    level_iters = [50, 25, 5]
    sigmas = [8.0, 4.0, 2.0]
    factors = [8, 4, 2]
    metric = MutualInformationMetric(nbins, sampling_prop)
    reg_obj = AffineRegistration(metric=metric,
                                 level_iters=level_iters,
                                 sigmas=sigmas,
                                 factors=factors,
                                 verbosity=0)

    # First, align the center of mass of both volume
    c_of_mass = transform_centers_of_mass(static, static_grid2world, moving,
                                          moving_grid2world)
    # Then, rigid transformation (translation + rotation)
    transform = RigidTransform3D()
    rigid = reg_obj.optimize(static,
                             moving,
                             transform,
                             params0,
                             static_grid2world,
                             moving_grid2world,
                             starting_affine=c_of_mass.affine)

    if transformation_type == 'affine':
        # Finally, affine transformation (translation + rotation + scaling)
        transform = AffineTransform3D()
        affine = reg_obj.optimize(static,
                                  moving,
                                  transform,
                                  params0,
                                  static_grid2world,
                                  moving_grid2world,
                                  starting_affine=rigid.affine)

        mapper = affine
        transformation = affine.affine
    else:
        mapper = rigid
        transformation = rigid.affine

    if dwi is not None:
        trans_dwi = transform_dwi(mapper, static, dwi)
        return trans_dwi, transformation
    else:
        return mapper.transform(moving), transformation
Example #4
0
def affine(moving,
           static,
           static_affine=None,
           moving_affine=None,
           starting_affine=None,
           reg=None):
    """
    Implements a translation transform

    Parameters
    ----------
    moving : array, nifti image or str
        Containing the data for the moving object, or full path to a nifti file
        with the moving data.

    moving_affine : 4x4 array, optional
        An affine transformation associated with the moving object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    static : array, nifti image or str
        Containing the data for the static object, or full path to a nifti file
        with the moving data.

    static_affine : 4x4 array, optional
        An affine transformation associated with the static object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    starting_affine: 4x4 array, optional
        Initial guess for the transformation between the spaces.

    reg : AffineRegistration class instance.

    Returns
    -------
    transformed, transform.affine : array with moving data resampled to the
    static space after computing the affine transformation and the affine
    4x4 associated with the transformation.
    """

    static, static_affine, moving, moving_affine, starting_affine = \
        _handle_pipeline_inputs(moving, static,
                                moving_affine=moving_affine,
                                static_affine=static_affine,
                                starting_affine=starting_affine)
    transform = AffineTransform3D()
    xform = reg.optimize(static,
                         moving,
                         transform,
                         None,
                         static_affine,
                         moving_affine,
                         starting_affine=starting_affine)

    return xform.affine
Example #5
0
def register_affine(t_masked,
                    m_masked,
                    affreg=None,
                    final_iters=(10000, 1000, 100)):
    """ Run affine registration between images `t_masked`, `m_masked`

    Parameters
    ----------
    t_masked : image
        Template image object, with image data masked to set out-of-brain
        voxels to zero.
    m_masked : image
        Moving (individual) image object, with image data masked to set
        out-of-brain voxels to zero.
    affreg : None or AffineRegistration instance, optional
        AffineRegistration with which to register `m_masked` to `t_masked`.  If
        None, we make an instance with default parameters.
    final_iters : tuple, optional
        Length 3 tuple of level iterations to use on final affine pass of the
        registration.

    Returns
    -------
    affine : shape (4, 4) ndarray
        Final affine mapping from voxels in `t_masked` to voxels in `m_masked`.
    """
    if affreg is None:
        metric = MutualInformationMetric(nbins=32, sampling_proportion=None)
        affreg = AffineRegistration(metric=metric)
    t_data = t_masked.get_data().astype(float)
    m_data = m_masked.get_data().astype(float)
    t_aff = t_masked.affine
    m_aff = m_masked.affine
    translation = affreg.optimize(t_data, m_data, TranslationTransform3D(),
                                  None, t_aff, m_aff)
    rigid = affreg.optimize(t_data,
                            m_data,
                            RigidTransform3D(),
                            None,
                            t_aff,
                            m_aff,
                            starting_affine=translation.affine)
    # Maybe bump up iterations for last step
    if final_iters is not None:
        affreg.level_iters = list(final_iters)
    affine = affreg.optimize(t_data,
                             m_data,
                             AffineTransform3D(),
                             None,
                             t_aff,
                             m_aff,
                             starting_affine=rigid.affine)
    return affine.affine
Example #6
0
    def affine(self, static, static_grid2world, moving, moving_grid2world,
               affreg, params0, progressive):
        """ Function for full affine registration.

        Parameters
        ----------
        static : 2D or 3D array
            the image to be used as reference during optimization.

        static_grid2world : array, shape (dim+1, dim+1), optional
            the voxel-to-space transformation associated with the static
            image. The default is None, implying the transform is the
            identity.

        moving : 2D or 3D array
            the image to be used as "moving" during optimization. It is
            necessary to pre-align the moving image to ensure its domain
            lies inside the domain of the deformation fields. This is assumed
            to be accomplished by "pre-aligning" the moving image towards the
            static using an affine transformation given by the
            'starting_affine' matrix.

        moving_grid2world : array, shape (dim+1, dim+1), optional
            the voxel-to-space transformation associated with the moving
            image. The default is None, implying the transform is the
            identity.

        affreg : An object of the image registration class.

        params0 : array, shape (n,)
            parameters from which to start the optimization. If None, the
            optimization will start at the identity transform. n is the
            number of parameters of the specified transformation.

        progressive : boolean
            Flag to enable or disable the progressive registration. (defa
            ult True)

        """
        if progressive:
            _, affine, xopt, fopt = self.rigid(static, static_grid2world,
                                               moving, moving_grid2world,
                                               affreg, params0, progressive)
        else:
            _, affine = self.center_of_mass(static, static_grid2world,
                                            moving, moving_grid2world)

        transform = AffineTransform3D()
        return self.perform_transformation(static, static_grid2world,
                                           moving, moving_grid2world,
                                           affreg, params0, transform,
                                           affine)
Example #7
0
def affine_transform(static, moving, static_grid2world, moving_grid2world,
                     nbins, sampling_prop, metric, level_iters, sigmas,
                     factors, starting_affine):

    transform = AffineTransform3D()
    params0 = None
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    affine = affreg.optimize(static,
                             moving,
                             transform,
                             params0,
                             static_grid2world,
                             moving_grid2world,
                             starting_affine=starting_affine)

    return affine
Example #8
0
def transform_affine(static, moving):
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = AffineTransform3D()
    params0 = None
    affine = affreg.optimize(static, moving, transform, params0, None, None,
                             None)
    transformed = affine.transform(moving)

    return transformed
Example #9
0
def img_reg(moving_img, target_img, reg='non-lin'):

    m_img = nib.load(moving_img)
    t_img = nib.load(target_img)

    m_img_data = m_img.get_data()
    t_img_data = t_img.get_data()

    m_img_affine = m_img.affine
    t_img_affine = t_img.affine

    identity = np.eye(4)
    affine_map = AffineMap(identity, t_img_data.shape, t_img_affine,
                           m_img_data.shape, m_img_affine)

    m_img_resampled = affine_map.transform(m_img_data)

    c_of_mass = transform_centers_of_mass(t_img_data, t_img_affine, m_img_data,
                                          m_img_affine)

    tf_m_img_c_mass = c_of_mass.transform(m_img_data)

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10, 10, 5]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(t_img_data,
                                  m_img_data,
                                  transform,
                                  params0,
                                  t_img_affine,
                                  m_img_affine,
                                  starting_affine=starting_affine)

    tf_m_img_translat = translation.transform(m_img_data)

    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(t_img_data,
                            m_img_data,
                            transform,
                            params0,
                            t_img_affine,
                            m_img_affine,
                            starting_affine=starting_affine)

    tf_m_img_rigid = rigid.transform(m_img_data)

    transform = AffineTransform3D()
    affreg.level_iters = [10, 10, 10]
    affine = affreg.optimize(t_img_data,
                             m_img_data,
                             transform,
                             params0,
                             t_img_affine,
                             m_img_affine,
                             starting_affine=rigid.affine)

    if reg is None or reg == 'non-lin':

        metric = CCMetric(3)
        level_iters = [10, 10, 5]
        sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

        mapping = sdr.optimize(t_img_data, m_img_data, t_img_affine,
                               m_img_affine, affine.affine)

        tf_m_img = mapping.transform(m_img_data)

    elif reg == 'affine':

        tf_m_img_aff = affine.transform(m_img_data)

    return tf_m_img

    metric = CCMetric(3)

    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(t_img_data,
                           m_img_data,
                           t_img_affine,
                           m_img_affine,
                           starting_affine=affine.affine)

    tf_m_img = mapping.transform(m_img_data)
Example #10
0
def warp_syn_dipy(static_fname, moving_fname):
    import os
    import numpy as np
    import nibabel as nb
    from dipy.align.metrics import CCMetric
    from dipy.align.imaffine import (transform_centers_of_mass, AffineMap,
                                     MutualInformationMetric,
                                     AffineRegistration)
    from dipy.align.transforms import (TranslationTransform3D,
                                       RigidTransform3D, AffineTransform3D)
    from dipy.align.imwarp import (DiffeomorphicMap,
                                   SymmetricDiffeomorphicRegistration)

    from nipype.utils.filemanip import fname_presuffix

    static = nb.load(static_fname)
    moving = nb.load(moving_fname)

    c_of_mass = transform_centers_of_mass(static.get_data(), static.affine,
                                          moving.get_data(), moving.affine)
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)
    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(static.get_data(),
                                  moving.get_data(),
                                  transform,
                                  params0,
                                  static.affine,
                                  moving.affine,
                                  starting_affine=starting_affine)

    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(static.get_data(),
                            moving.get_data(),
                            transform,
                            params0,
                            static.affine,
                            moving.affine,
                            starting_affine=starting_affine)
    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(static.get_data(),
                             moving.get_data(),
                             transform,
                             params0,
                             static.affine,
                             moving.affine,
                             starting_affine=starting_affine)

    metric = CCMetric(3, sigma_diff=3.)
    level_iters = [25, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
    starting_affine = affine.affine
    mapping = sdr.optimize(static.get_data(), moving.get_data(), static.affine,
                           moving.affine, starting_affine)

    warped_filename = os.path.abspath(
        fname_presuffix(moving_fname,
                        newpath='./',
                        suffix='_warped',
                        use_ext=True))
    warped = nb.Nifti1Image(mapping.transform(moving.get_data()),
                            static.affine)
    warped.to_filename(warped_filename)

    warp_filename = os.path.abspath(
        fname_presuffix(moving_fname,
                        newpath='./',
                        suffix='_warp.npz',
                        use_ext=False))
    np.savez(warp_filename,
             prealign=mapping.prealign,
             forward=mapping.forward,
             backward=mapping.backward)

    return warp_filename, warped_filename
Example #11
0
def affine_reg(static_path, moving_path):
    """

    :param static_path:
    :param moving_path:
    :return:
    """
    t0_time = time.time()

    print('-->Applying affine reg over', basename(moving_path), 'based on',
          basename(static_path))

    static_img = nib.load(static_path)
    static = static_img.get_data()
    static_grid2world = static_img.affine

    moving_img = nib.load(moving_path)
    moving = np.array(moving_img.get_data())
    moving_grid2world = moving_img.affine

    print('---> I. Translation of the moving image towards the static image')

    print(
        '---> Resembling the moving image on a grid of the same dimensions as the static image'
    )

    identity = np.eye(4)
    affine_map = AffineMap(identity, static.shape, static_grid2world,
                           moving.shape, moving_grid2world)
    resampled = affine_map.transform(moving)

    regtools.overlay_slices(static, resampled, None, 0, "Static", "Moving",
                            "resampled_0.png")
    regtools.overlay_slices(static, resampled, None, 1, "Static", "Moving",
                            "resampled_1.png")
    regtools.overlay_slices(static, resampled, None, 2, "Static", "Moving",
                            "resampled_2.png")

    print('---> Aligning the centers of mass of the two images')

    c_of_mass = transform_centers_of_mass(static, static_grid2world, moving,
                                          moving_grid2world)

    print(
        '---> We can now transform the moving image and draw it on top of the static image'
    )

    transformed = c_of_mass.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_com_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_com_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_com_2.png")

    print('---> II. Refine  by looking for an affine transform')

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    """
    Now we go ahead and instantiate the registration class with the configuration
    we just prepared
    """
    print('---> Computing Affine Registration (non-convex optimization)')

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(static,
                                  moving,
                                  transform,
                                  params0,
                                  static_grid2world,
                                  moving_grid2world,
                                  starting_affine=starting_affine)

    transformed = translation.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_trans_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_trans_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_trans_2.png")

    print('--->III. Refining with a rigid transform')

    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(static,
                            moving,
                            transform,
                            params0,
                            static_grid2world,
                            moving_grid2world,
                            starting_affine=starting_affine)

    transformed = rigid.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_rigid_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_rigid_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_rigid_2.png")

    print(
        '--->IV. Refining with a full afine transform (translation, rotation, scale and shear)'
    )

    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(static,
                             moving,
                             transform,
                             params0,
                             static_grid2world,
                             moving_grid2world,
                             starting_affine=starting_affine)

    print('---> Results in a slight shear and scale')

    transformed = affine.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_affine_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_affine_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_affine_2.png")

    name = os.path.splitext(basename(moving_path))[0] + '_affine_reg'
    nib.save(nib.Nifti1Image(transformed, np.eye(4)), name)
    t1_time = time.time()
    total_time = t1_time - t0_time
    print('Total time:' + str(total_time))
    return print('Successfully affine registration applied')
Example #12
0
def gm_network(mr_filename, gm_filename, at_filename, template_mr):

    new_gmfilename = os.path.abspath(gm_filename).replace(".nii", "_mni.nii")
    new_atfilename = os.path.abspath(at_filename).replace(".nii", "_mni.nii")

    networks = 0

    if not ((os.path.isfile(new_gmfilename)) or
            (os.path.islink(new_gmfilename))):

        print('file {} does not exist'.format(new_gmfilename))
        print('performing registration to MNI ... ', end='')
        start = time.process_time()

        # see if we can maybe find a transform
        tf_filename = os.path.abspath(gm_filename).split(
            '.nii')[0] + "_reg.npz"
        if not ((os.path.isfile(tf_filename)) or
                (os.path.islink(tf_filename))):

            # see https://dipy.org/documentation/1.2.0./examples_built/ ..
            #           .. affine_registration_3d/#example-affine-registration-3d
            static, static_affine = load_nifti(template_mr)
            moving, moving_affine = load_nifti(mr_filename)
            grey, grey_affine = load_nifti(gm_filename)
            atl, atl_affine = load_nifti(at_filename)

            # first initialise by putting centres of mass on top of each other
            c_of_mass = transform_centers_of_mass(static, static_affine,
                                                  moving, moving_affine)

            # initialise transform parameters (e.g. the mutual information criterion)
            # these parameters won' need to be changed between the different stages
            nbins = 64
            sampling_prop = None
            metric = MutualInformationMetric(nbins, sampling_prop)
            level_iters = [25, 15, 5]
            sigmas = [2, 1, 0]
            factors = [4, 2, 1]
            affreg = AffineRegistration(metric=metric,
                                        level_iters=level_iters,
                                        sigmas=sigmas,
                                        factors=factors)

            # give slightly more degrees of freedom, by allowing translation of centre of gravity
            print('\nTranslation only:')
            transform = TranslationTransform3D()
            params0 = None
            translation = affreg.optimize(static,
                                          moving,
                                          transform,
                                          params0,
                                          static_affine,
                                          moving_affine,
                                          starting_affine=c_of_mass.affine)

            # refine further by allowing all rigid transforms (rotations/translations around the centre of gravity)
            print('Rigid transform:')
            transform = RigidTransform3D()
            params0 = None
            rigid = affreg.optimize(static,
                                    moving,
                                    transform,
                                    params0,
                                    static_affine,
                                    moving_affine,
                                    starting_affine=translation.affine)

            full_affine = False
            # the GM networks method is based on keeping the cortical shape intact

            if (full_affine):

                # refine to a full affine transform by adding scaling and shearing
                print('Affine transform:')
                transform = AffineTransform3D()
                params0 = None
                affine = affreg.optimize(static,
                                         moving,
                                         transform,
                                         params0,
                                         static_affine,
                                         moving_affine,
                                         starting_affine=rigid.affine)
                final = affine

            else:

                final = rigid

            np.savez(tf_filename, final)

        else:

            with np.load(tf_filename, allow_pickle=True) as npzfile:
                final = npzfile['arr_0']

        # transform the grey matter data instead of the MRI itself
        resampled = final.transform(grey)
        save_nifti(new_gmfilename, resampled, static_affine)
        resampled = final.transform(atl)
        save_nifti(new_atfilename, resampled, static_affine)

        print('finished in {:.2f}s'.format(time.process_time() - start))

    if ((os.path.isfile(new_gmfilename)) or (os.path.islink(new_gmfilename))):

        # only cube size implemented so far
        cubesize = 3

        # load the grey matter map and the template to which it was registered
        gm_img = nib.load(new_gmfilename)
        template_data = np.asarray(nib.load(template_mr).dataobj)
        gm_data = np.asarray(gm_img.dataobj)

        # find the best cube grid position (with the most nonzero cubes)
        cube_nonzeros, cube_offsets, gm_incubes = cube_grid_position(
            gm_data, template_data, cubesize)
        gm_shape = gm_incubes.shape

        # write out the cube map, where each voxel in a cube is labelled with its cube index
        # be aware of the @ operator, this is a true matrix product A[n*m] x B[m*p] = C [n*p]
        cubes_data = np.zeros(template_data.shape).flatten()
        cubes_data[cube_offsets] = np.ones(
            cubesize**3)[:, np.newaxis] @ np.arange(cube_nonzeros).reshape(
                1, cube_nonzeros)
        cubes_data = cubes_data.reshape(template_data.shape)
        cubes_file = os.path.abspath(gm_filename).replace(
            ".nii", "_cubes.nii")
        cubes_map = nib.Nifti1Image(cubes_data, gm_img.affine)
        cubes_map.to_filename(cubes_file)

        # make a randomised version of the grey matter densities in the cubes
        # 1: exchange between and inside cubes (could be too many degrees of freedom!)
        gm_random = gm_incubes.flatten()
        gm_random = gm_random[np.random.permutation(
            len(gm_random)).reshape(gm_shape)]
        # 2: exchange cubes only ( this won't change the values in the correlation matrix, only positions )
        # gm_random = gm_incubes [ :, np.random.permutation ( gm_shape [1] ) ];
        # 3: exchange cubes and shuffle inside cubes
        # gm_random = gm_incubes [ np.random.permutation ( gm_shape [0] ), np.random.permutation ( gm_shape [1] )[ :, np.newaxis ] ];

        add_diag = True

        # name of the NIfTI file with networks
        networks_file = os.path.abspath(gm_filename).replace(
            ".nii", "_gmnet.nii")

        if not ((os.path.isfile(networks_file)) or
                (os.path.islink(networks_file))):

            # compute the cross correlation for observed and randomised cubes
            networks = cube_cross_correlation(gm_incubes, gm_random, cubesize,
                                              add_diag)

            # save the networks to a file
            networks_map = nib.Nifti1Image(networks, np.eye(4))
            networks_map.to_filename(networks_file)

        else:

            print("loading already existing file")
            networks = np.asarray(nib.load(networks_file).dataobj)

    return networks, networks_file
Example #13
0
def wm_syn(t1w_brain,
           ap_path,
           working_dir,
           fa_path=None,
           template_fa_path=None):
    """
    A function to perform SyN registration

    Parameters
    ----------
        t1w_brain  : str
            File path to the skull-stripped T1w brain Nifti1Image.
        ap_path : str
            File path to the AP moving image.
        working_dir : str
            Path to the working directory to perform SyN and save outputs.
        fa_path : str
            File path to the FA moving image.
        template_fa_path  : str
            File path to the T1w-connformed template FA reference image.
    """
    import uuid
    from time import strftime
    from dipy.align.imaffine import (
        MutualInformationMetric,
        AffineRegistration,
        transform_origins,
    )
    from dipy.align.transforms import (
        TranslationTransform3D,
        RigidTransform3D,
        AffineTransform3D,
    )
    from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
    from dipy.align.metrics import CCMetric

    # from dipy.viz import regtools
    # from nilearn.image import resample_to_img

    ap_img = nib.load(ap_path)
    t1w_brain_img = nib.load(t1w_brain)
    static = np.asarray(t1w_brain_img.dataobj, dtype=np.float32)
    static_affine = t1w_brain_img.affine
    moving = np.asarray(ap_img.dataobj, dtype=np.float32)
    moving_affine = ap_img.affine

    affine_map = transform_origins(static, static_affine, moving,
                                   moving_affine)

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10, 10, 5]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affine_reg = AffineRegistration(metric=metric,
                                    level_iters=level_iters,
                                    sigmas=sigmas,
                                    factors=factors)
    transform = TranslationTransform3D()

    params0 = None
    translation = affine_reg.optimize(static, moving, transform, params0,
                                      static_affine, moving_affine)
    transform = RigidTransform3D()

    rigid_map = affine_reg.optimize(
        static,
        moving,
        transform,
        params0,
        static_affine,
        moving_affine,
        starting_affine=translation.affine,
    )
    transform = AffineTransform3D()

    # We bump up the iterations to get a more exact fit:
    affine_reg.level_iters = [1000, 1000, 100]
    affine_opt = affine_reg.optimize(
        static,
        moving,
        transform,
        params0,
        static_affine,
        moving_affine,
        starting_affine=rigid_map.affine,
    )

    # We now perform the non-rigid deformation using the Symmetric
    # Diffeomorphic Registration(SyN) Algorithm:
    metric = CCMetric(3)
    level_iters = [10, 10, 5]

    # Refine fit
    if template_fa_path is not None:
        from nilearn.image import resample_to_img
        fa_img = nib.load(fa_path)
        template_img = nib.load(template_fa_path)
        template_img_res = resample_to_img(template_img, t1w_brain_img)
        static = np.asarray(template_img_res.dataobj, dtype=np.float32)
        static_affine = template_img_res.affine
        moving = np.asarray(fa_img.dataobj, dtype=np.float32)
        moving_affine = fa_img.affine
    else:
        static = np.asarray(t1w_brain_img.dataobj, dtype=np.float32)
        static_affine = t1w_brain_img.affine
        moving = np.asarray(ap_img.dataobj, dtype=np.float32)
        moving_affine = ap_img.affine

    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_affine, moving_affine,
                           affine_opt.affine)
    warped_moving = mapping.transform(moving)

    # Save warped FA image
    run_uuid = f"{strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4()}"
    warped_fa = f"{working_dir}/warped_fa_{run_uuid}.nii.gz"
    nib.save(nib.Nifti1Image(warped_moving, affine=static_affine), warped_fa)

    # # We show the registration result with:
    # regtools.overlay_slices(static, warped_moving, None, 0,
    # "Static", "Moving",
    #                         "%s%s%s%s" % (working_dir,
    #                         "/transformed_sagittal_", run_uuid, ".png"))
    # regtools.overlay_slices(static, warped_moving, None,
    # 1, "Static", "Moving",
    #                         "%s%s%s%s" % (working_dir,
    #                         "/transformed_coronal_", run_uuid, ".png"))
    # regtools.overlay_slices(static, warped_moving,
    # None, 2, "Static", "Moving",
    #                         "%s%s%s%s" % (working_dir,
    #                         "/transformed_axial_", run_uuid, ".png"))

    return mapping, affine_map, warped_fa
def affine_registration(reference, reference_grid2world, scan,
                        scan_grid2world):
    #get first b0 volumes for both scans
    reference_b0 = reference[:, :, :, 0]
    scan_b0 = scan[:, :, :, 0]

    #In this function we use multiple stages to register the 2 scans
    #providng previous results as initialisation to the next stage,
    #the reason we do this is because registration is a non-convex
    #problem thus it is important to initialise as close to the
    #optiaml value as possible

    #Stage1: we obtain a very rough (and fast) registration by just aligning
    #the centers of mass of the two images
    center_of_mass = transform_centers_of_mass(reference_b0,
                                               reference_grid2world, scan_b0,
                                               scan_grid2world)

    #create the similarity metric (Mutual Information) to be used:
    nbins = 32
    sampling_prop = None  #use all voxels to perform registration
    metric = MutualInformationMetric(nbins, sampling_prop)

    #We use a multi-resolution stratergy to accelerate convergence and avoid
    #getting stuck at local optimas (below are the parameters)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0
              ]  #parameters for gaussian kernel smoothing at each resolution
    factors = [4, 2, 1]  #subsampling factor

    #optimisation algorithm used is L-BFGS-B
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    #Stage2: Perform a basic translation transform
    transform = TranslationTransform3D()
    translation = affreg.optimize(reference_b0,
                                  scan_b0,
                                  transform,
                                  None,
                                  reference_grid2world,
                                  scan_grid2world,
                                  starting_affine=center_of_mass.affine)

    #Stage3 : optimize previous result with a rigid transform
    #(Includes translation, rotation)
    transform = RigidTransform3D()
    rigid = affreg.optimize(reference_b0,
                            scan_b0,
                            transform,
                            None,
                            reference_grid2world,
                            scan_grid2world,
                            starting_affine=translation.affine)

    #Stage4 : optimize previous result with a affine transform
    #(Includes translation, rotation, scale, shear)
    transform = AffineTransform3D()
    affine = affreg.optimize(reference_b0,
                             scan_b0,
                             transform,
                             None,
                             reference_grid2world,
                             scan_grid2world,
                             starting_affine=rigid.affine)

    if params.reg_type == "SDR":
        #Stage 5 : Symmetric Diffeomorphic Registration
        metric = CCMetric(3)
        level_iters = [400, 200, 100]
        sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
        mapping = sdr.optimize(reference_b0, scan_b0, reference_grid2world,
                               scan_grid2world, affine.affine)
    else:
        mapping = affine
    #Once this is completed we can perform the affine transformation on each
    #volume of scan2

    for volume in range(0, scan.shape[3]):
        #note affine is an AffineMap object,
        #The transform method transforms the input image from co-domain to domain space
        #By default, the transformed image is sampled at a grid defined by the shape of the domain
        #The sampling is performed using linear interpolation (refer to comp vision lab on homographies)
        scan[:, :, :, volume] = mapping.transform(scan[:, :, :, volume])

    return scan
Example #15
0
def affine_registration(moving,
                        static,
                        moving_affine=None,
                        static_affine=None,
                        pipeline=None,
                        starting_affine=None,
                        metric='MI',
                        level_iters=None,
                        sigmas=None,
                        factors=None,
                        ret_metric=False,
                        **metric_kwargs):
    """
    Find the affine transformation between two 3D images. Alternatively, find
    the combination of several linear transformations.

    Parameters
    ----------
    moving : array, nifti image or str
        Containing the data for the moving object, or full path to a nifti file
        with the moving data.

    static : array, nifti image or str
        Containing the data for the static object, or full path to a nifti file
        with the moving data.

    moving_affine : 4x4 array, optional
        An affine transformation associated with the moving object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    static_affine : 4x4 array, optional
        An affine transformation associated with the static object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    pipeline : list of str, optional
        Sequence of transforms to use in the gradual fitting. Default: gradual
        fit of the full affine (executed from left to right):
        `["center_of_mass", "translation", "rigid", "affine"]`
        Alternatively, any other combination of the following registration
        methods might be used: center_of_mass, translation, rigid,
        rigid_isoscaling, rigid_scaling and affine.

    starting_affine: 4x4 array, optional
        Initial guess for the transformation between the spaces.
        Default: identity.

    metric : str, optional.
        Currently only supports 'MI' for MutualInformationMetric.

    level_iters : sequence, optional
        AffineRegistration key-word argument: the number of iterations at each
        scale of the scale space. `level_iters[0]` corresponds to the coarsest
        scale, `level_iters[-1]` the finest, where n is the length of the
        sequence. By default, a 3-level scale space with iterations
        sequence equal to [10000, 1000, 100] will be used.

    sigmas : sequence of floats, optional
        AffineRegistration key-word argument: custom smoothing parameter to
        build the scale space (one parameter for each scale). By default,
        the sequence of sigmas will be [3, 1, 0].

    factors : sequence of floats, optional
        AffineRegistration key-word argument: custom scale factors to build the
        scale space (one factor for each scale). By default, the sequence of
        factors will be [4, 2, 1].

    ret_metric : boolean, optional
        Set it to True to return the value of the optimized coefficients and
        the optimization quality metric.

    nbins : int, optional
        MutualInformationMetric key-word argument: the number of bins to be
        used for computing the intensity histograms. The default is 32.

    sampling_proportion : None or float in interval (0, 1], optional
        MutualInformationMetric key-word argument: There are two types of
        sampling: dense and sparse. Dense sampling uses all voxels for
        estimating the (joint and marginal) intensity histograms, while
        sparse sampling uses a subset of them. If `sampling_proportion` is
        None, then dense sampling is used. If `sampling_proportion` is a
        floating point value in (0,1] then sparse sampling is used,
        where `sampling_proportion` specifies the proportion of voxels to
        be used. The default is None (dense sampling).

    Returns
    -------
    transformed : array with moving data resampled to the static space
    after computing the affine transformation
    affine : the affine 4x4 associated with the transformation.
    xopt : the value of the optimized coefficients.
    fopt : the value of the optimization quality metric.

    Notes
    -----
    Performs a gradual registration between the two inputs, using a pipeline
    that gradually approximates the final registration. If the final default
    step (`affine`) is ommitted, the resulting affine may not have all 12
    degrees of freedom adjusted.
    """
    pipeline = pipeline or ["center_of_mass", "translation", "rigid", "affine"]
    level_iters = level_iters or [10000, 1000, 100]
    sigmas = sigmas or [3, 1, 0.0]
    factors = factors or [4, 2, 1]

    static, static_affine, moving, moving_affine, starting_affine = \
        _handle_pipeline_inputs(moving, static,
                                moving_affine=moving_affine,
                                static_affine=static_affine,
                                starting_affine=starting_affine)

    # Define the Affine registration object we'll use with the chosen metric.
    # For now, there is only one metric (mutual information)
    use_metric = affine_metric_dict[metric](**metric_kwargs)

    affreg = AffineRegistration(metric=use_metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    if pipeline == ["center_of_mass"] and ret_metric:
        raise ValueError("center of mass registration cannot return any "
                         "quality metric.")

    # Go through the selected transformation:
    for func in pipeline:
        if func == "center_of_mass":
            transform = transform_centers_of_mass(static, static_affine,
                                                  moving, moving_affine)
            starting_affine = transform.affine
        else:
            if func == "translation":
                transform = TranslationTransform3D()
            elif func == "rigid":
                transform = RigidTransform3D()
            elif func == "rigid_isoscaling":
                transform = RigidIsoScalingTransform3D()
            elif func == "rigid_scaling":
                transform = RigidScalingTransform3D()
            elif func == "affine":
                transform = AffineTransform3D()
            else:
                raise ValueError("Not supported registration method")

            xform, xopt, fopt \
                = affreg.optimize(static, moving, transform, None,
                                  static_affine, moving_affine,
                                  starting_affine=starting_affine,
                                  ret_metric=True)
            starting_affine = xform.affine

    # After doing all that, resample once at the end:
    affine_map = AffineMap(starting_affine, static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)

    # Return the optimization metric only if requested
    if ret_metric:
        return resampled, starting_affine, xopt, fopt
    return resampled, starting_affine
Example #16
0
def wm_syn(template_path, fa_path, working_dir):
    """A function to perform ANTS SyN registration using dipy functions

    Parameters
    ----------
    template_path  : str
        File path to the template reference FA image.
    fa_path : str
        File path to the FA moving image (image to be fitted to reference)
    working_dir : str
        Path to the working directory to perform SyN and save outputs.

    Returns
    -------
    DiffeomorphicMap
        An object that can be used to register images back and forth between static (template) and moving (FA) domains
    AffineMap
        An object used to transform the moving (FA) image towards the static image (template)
    """

    fa_img = nib.load(fa_path)
    template_img = nib.load(template_path)

    static = template_img.get_data()
    static_affine = template_img.affine
    moving = fa_img.get_data().astype(np.float32)
    moving_affine = fa_img.affine

    affine_map = transform_origins(static, static_affine, moving,
                                   moving_affine)

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10, 10, 5]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affine_reg = AffineRegistration(metric=metric,
                                    level_iters=level_iters,
                                    sigmas=sigmas,
                                    factors=factors)
    transform = TranslationTransform3D()

    params0 = None
    translation = affine_reg.optimize(static, moving, transform, params0,
                                      static_affine, moving_affine)
    transform = RigidTransform3D()

    rigid_map = affine_reg.optimize(
        static,
        moving,
        transform,
        params0,
        static_affine,
        moving_affine,
        starting_affine=translation.affine,
    )
    transform = AffineTransform3D()

    # We bump up the iterations to get a more exact fit:
    affine_reg.level_iters = [1000, 1000, 100]
    affine_opt = affine_reg.optimize(
        static,
        moving,
        transform,
        params0,
        static_affine,
        moving_affine,
        starting_affine=rigid_map.affine,
    )

    # We now perform the non-rigid deformation using the Symmetric Diffeomorphic Registration(SyN) Algorithm:
    metric = CCMetric(3)
    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_affine, moving_affine,
                           affine_opt.affine)
    warped_moving = mapping.transform(moving)

    # We show the registration result with:
    regtools.overlay_slices(
        static,
        warped_moving,
        None,
        0,
        "Static",
        "Moving",
        f"{working_dir}/transformed_sagittal.png",
    )
    regtools.overlay_slices(
        static,
        warped_moving,
        None,
        1,
        "Static",
        "Moving",
        f"{working_dir}/transformed_coronal.png",
    )
    regtools.overlay_slices(
        static,
        warped_moving,
        None,
        2,
        "Static",
        "Moving",
        f"{working_dir}/transformed_axial.png",
    )

    return mapping, affine_map
Example #17
0
def wm_syn(template_path, fa_path, working_dir):
    """
    A function to perform ANTS SyN registration

    Parameters
    ----------
        template_path  : str
            File path to the template reference image.
        fa_path : str
            File path to the FA moving image.
        working_dir : str
            Path to the working directory to perform SyN and save outputs.
    """
    import uuid
    from time import strftime
    from dipy.align.imaffine import MutualInformationMetric, AffineRegistration, transform_origins
    from dipy.align.transforms import TranslationTransform3D, RigidTransform3D, AffineTransform3D
    from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
    from dipy.align.metrics import CCMetric
    from dipy.viz import regtools

    fa_img = nib.load(fa_path)
    template_img = nib.load(template_path)

    static = np.asarray(template_img.dataobj)
    static_affine = template_img.affine
    moving = np.asarray(fa_img.dataobj).astype(np.float32)
    moving_affine = fa_img.affine

    affine_map = transform_origins(static, static_affine, moving,
                                   moving_affine)

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10, 10, 5]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affine_reg = AffineRegistration(metric=metric,
                                    level_iters=level_iters,
                                    sigmas=sigmas,
                                    factors=factors)
    transform = TranslationTransform3D()

    params0 = None
    translation = affine_reg.optimize(static, moving, transform, params0,
                                      static_affine, moving_affine)
    transform = RigidTransform3D()

    rigid_map = affine_reg.optimize(static,
                                    moving,
                                    transform,
                                    params0,
                                    static_affine,
                                    moving_affine,
                                    starting_affine=translation.affine)
    transform = AffineTransform3D()

    # We bump up the iterations to get a more exact fit:
    affine_reg.level_iters = [1000, 1000, 100]
    affine_opt = affine_reg.optimize(static,
                                     moving,
                                     transform,
                                     params0,
                                     static_affine,
                                     moving_affine,
                                     starting_affine=rigid_map.affine)

    # We now perform the non-rigid deformation using the Symmetric Diffeomorphic Registration(SyN) Algorithm:
    metric = CCMetric(3)
    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_affine, moving_affine,
                           affine_opt.affine)
    warped_moving = mapping.transform(moving)

    # Save warped FA image
    run_uuid = '%s_%s' % (strftime('%Y%m%d_%H%M%S'), uuid.uuid4())
    warped_fa = '{}/warped_fa_{}.nii.gz'.format(working_dir, run_uuid)
    nib.save(nib.Nifti1Image(warped_moving, affine=static_affine), warped_fa)

    # We show the registration result with:
    regtools.overlay_slices(
        static, warped_moving, None, 0, "Static", "Moving",
        "%s%s%s%s" % (working_dir, "/transformed_sagittal_", run_uuid, ".png"))
    regtools.overlay_slices(
        static, warped_moving, None, 1, "Static", "Moving",
        "%s%s%s%s" % (working_dir, "/transformed_coronal_", run_uuid, ".png"))
    regtools.overlay_slices(
        static, warped_moving, None, 2, "Static", "Moving",
        "%s%s%s%s" % (working_dir, "/transformed_axial_", run_uuid, ".png"))

    return mapping, affine_map, warped_fa
def main():
    # reads the tractography data in trk format
    # extracts streamlines and the file header. Streamlines should be in the same coordinate system as the FA map (used later).
    # input example: '/home/Example_data/tracts.trk'
    tractography_file = input(
        "Please, specify the file with tracts that you would like to analyse. File should be in the trk format. "
    )

    streams, hdr = load_trk(tractography_file)  # for old DIPY version
    # sft = load_trk(tractography_file, tractography_file)
    # streams = sft.streamlines
    streams_array = np.asarray(streams)
    print('imported tractography data:' + tractography_file)

    # load T1fs_conform image that operates in the same coordinates as simnibs except for the fact the center of mesh
    # is located at the image center
    # T1fs_conform image should be generated in advance during the head meshing procedure
    # input example: fname_T1='/home/Example_data/T1fs_conform.nii.gz'

    fname_T1 = input(
        "Please, specify the T1fs_conform image that has been generated during head meshing procedure. "
    )
    data_T1, affine_T1 = load_nifti(fname_T1)

    # load FA image in the same coordinates as tracts
    # input example:fname_FA='/home/Example_data/DTI_FA.nii'
    fname_FA = input("Please, specify the FA image. ")
    data_FA, affine_FA = load_nifti(fname_FA)

    print('loaded T1fs_conform.nii and FA images')

    # specify the head mesh file that is used later in simnibs to simulate induced electric field
    # input example:'/home/Example_data/SUBJECT_MESH.msh'
    global mesh_path
    mesh_path = input("Please, specify the head mesh file. ")

    last_slach = max([i for i, ltr in enumerate(mesh_path) if ltr == '/']) + 1
    global subject_name
    subject_name = mesh_path[last_slach:-4]

    # specify the directory where you would like to save your simulation results
    # input example:'/home/Example_data/Output'
    global out_dir
    out_dir = input(
        "Please, specify the directory where you would like to save your simulation results. "
    )
    out_dir = out_dir + '/simulation_at_pos_'

    # Co-registration of T1fs_conform and FA images. Performed in 4 steps.
    # Step 1. Calculation of the center of mass transform. Used later as starting transform.
    c_of_mass = transform_centers_of_mass(data_T1, affine_T1, data_FA,
                                          affine_FA)
    print('calculated c_of_mass transformation')

    # Step 2. Calculation of a 3D translation transform. Used in the next step as starting transform.
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(data_T1,
                                  data_FA,
                                  transform,
                                  params0,
                                  affine_T1,
                                  affine_FA,
                                  starting_affine=starting_affine)
    print('calculated 3D translation transform')

    # Step 3. Calculation of a Rigid 3D transform. Used in the next step as starting transform
    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(data_T1,
                            data_FA,
                            transform,
                            params0,
                            affine_T1,
                            affine_FA,
                            starting_affine=starting_affine)
    print('calculated Rigid 3D transform')

    # Step 4. Calculation of an affine transform. Used for co-registration of T1 and FA images.
    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(data_T1,
                             data_FA,
                             transform,
                             params0,
                             affine_T1,
                             affine_FA,
                             starting_affine=starting_affine)

    print('calculated Affine 3D transform')

    identity = np.eye(4)

    inv_affine_FA = np.linalg.inv(affine_FA)
    inv_affine_T1 = np.linalg.inv(affine_T1)
    inv_affine = np.linalg.inv(affine.affine)

    # transforming streamlines to FA space
    new_streams_FA = streamline.transform_streamlines(streams, inv_affine_FA)
    new_streams_FA_array = np.asarray(new_streams_FA)

    T1_to_FA = np.dot(inv_affine_FA, np.dot(affine.affine, affine_T1))
    FA_to_T1 = np.linalg.inv(T1_to_FA)

    # transforming streamlines from FA to T1 space
    new_streams_T1 = streamline.transform_streamlines(new_streams_FA, FA_to_T1)
    global new_streams_T1_array
    new_streams_T1_array = np.asarray(new_streams_T1)

    # calculating amline derivatives along the streamlines to get the local orientation of the streamlines
    global streams_array_derivative
    streams_array_derivative = copy.deepcopy(new_streams_T1_array)

    print('calculating amline derivatives')
    for stream in range(len(new_streams_T1_array)):
        my_steam = new_streams_T1_array[stream]
        for t in range(len(my_steam[:, 0])):
            streams_array_derivative[stream][t,
                                             0] = my_deriv(t, my_steam[:, 0])
            streams_array_derivative[stream][t,
                                             1] = my_deriv(t, my_steam[:, 1])
            streams_array_derivative[stream][t,
                                             2] = my_deriv(t, my_steam[:, 2])
            deriv_norm = np.linalg.norm(streams_array_derivative[stream][t, :])
            streams_array_derivative[stream][
                t, :] = streams_array_derivative[stream][t, :] / deriv_norm

    # to create a torus representing a coil in an interactive window

    torus = vtk.vtkParametricTorus()
    torus.SetRingRadius(5)
    torus.SetCrossSectionRadius(2)

    torusSource = vtk.vtkParametricFunctionSource()
    torusSource.SetParametricFunction(torus)
    torusSource.SetScalarModeToPhase()

    torusMapper = vtk.vtkPolyDataMapper()
    torusMapper.SetInputConnection(torusSource.GetOutputPort())
    torusMapper.SetScalarRange(0, 360)

    torusActor = vtk.vtkActor()
    torusActor.SetMapper(torusMapper)

    torus_pos_x = 100
    torus_pos_y = 129
    torus_pos_z = 211
    torusActor.SetPosition(torus_pos_x, torus_pos_y, torus_pos_z)

    list_streams_T1 = list(new_streams_T1)
    # adding one fictive bundle of length 1 with coordinates [0,0,0] to avoid some bugs with actor.line during visualization
    list_streams_T1.append(np.array([0, 0, 0]))

    global bundle_native
    bundle_native = list_streams_T1

    # generating a list of colors to visualize later the stimualtion effects
    effect_max = 0.100
    effect_min = -0.100
    global colors
    colors = [
        np.random.rand(*current_streamline.shape)
        for current_streamline in bundle_native
    ]

    for my_streamline in range(len(bundle_native) - 1):
        my_stream = copy.deepcopy(bundle_native[my_streamline])
        for point in range(len(my_stream)):
            colors[my_streamline][point] = vtkplotter.colors.colorMap(
                (effect_min + effect_max) / 2,
                name='jet',
                vmin=effect_min,
                vmax=effect_max)

    colors[my_streamline + 1] = vtkplotter.colors.colorMap(effect_min,
                                                           name='jet',
                                                           vmin=effect_min,
                                                           vmax=effect_max)

    # Vizualization of fibers over T1

    # i_coord = 0
    # j_coord = 0
    # k_coord = 0
    # global number_of_stimulations
    number_of_stimulations = 0

    actor_line_list = []

    scene = window.Scene()
    scene.clear()
    scene.background((0.5, 0.5, 0.5))

    world_coords = False
    shape = data_T1.shape

    lut = actor.colormap_lookup_table(scale_range=(effect_min, effect_max),
                                      hue_range=(0.4, 1.),
                                      saturation_range=(1, 1.))

    # # the lines below is for a non-interactive demonstration run only.
    # # they should remain commented unless you set "interactive" to False
    # lut, colors = change_TMS_effects(torus_pos_x, torus_pos_y, torus_pos_z)
    # bar =  actor.scalar_bar(lut)
    # bar.SetTitle("TMS effect")
    # bar.SetHeight(0.3)
    # bar.SetWidth(0.10)
    # bar.SetPosition(0.85, 0.3)
    # scene.add(bar)

    actor_line_list.append(
        actor.line(bundle_native,
                   colors,
                   linewidth=5,
                   fake_tube=True,
                   lookup_colormap=lut))

    if not world_coords:
        image_actor_z = actor.slicer(data_T1, identity)
    else:
        image_actor_z = actor.slicer(data_T1, identity)

    slicer_opacity = 0.6
    image_actor_z.opacity(slicer_opacity)

    image_actor_x = image_actor_z.copy()
    x_midpoint = int(np.round(shape[0] / 2))
    image_actor_x.display_extent(x_midpoint, x_midpoint, 0, shape[1] - 1, 0,
                                 shape[2] - 1)

    image_actor_y = image_actor_z.copy()
    y_midpoint = int(np.round(shape[1] / 2))
    image_actor_y.display_extent(0, shape[0] - 1, y_midpoint, y_midpoint, 0,
                                 shape[2] - 1)
    """
    Connect the actors with the scene.
    """

    scene.add(actor_line_list[0])
    scene.add(image_actor_z)
    scene.add(image_actor_x)
    scene.add(image_actor_y)

    show_m = window.ShowManager(scene, size=(1200, 900))
    show_m.initialize()
    """
    Create sliders to move the slices and change their opacity.
    """

    line_slider_z = ui.LineSlider2D(min_value=0,
                                    max_value=shape[2] - 1,
                                    initial_value=shape[2] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    line_slider_x = ui.LineSlider2D(min_value=0,
                                    max_value=shape[0] - 1,
                                    initial_value=shape[0] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    line_slider_y = ui.LineSlider2D(min_value=0,
                                    max_value=shape[1] - 1,
                                    initial_value=shape[1] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    opacity_slider = ui.LineSlider2D(min_value=0.0,
                                     max_value=1.0,
                                     initial_value=slicer_opacity,
                                     length=140)
    """
    Сallbacks for the sliders.
    """
    def change_slice_z(slider):
        z = int(np.round(slider.value))
        image_actor_z.display_extent(0, shape[0] - 1, 0, shape[1] - 1, z, z)

    def change_slice_x(slider):
        x = int(np.round(slider.value))
        image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1)

    def change_slice_y(slider):
        y = int(np.round(slider.value))
        image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1)

    def change_opacity(slider):
        slicer_opacity = slider.value
        image_actor_z.opacity(slicer_opacity)
        image_actor_x.opacity(slicer_opacity)
        image_actor_y.opacity(slicer_opacity)

    line_slider_z.on_change = change_slice_z
    line_slider_x.on_change = change_slice_x
    line_slider_y.on_change = change_slice_y
    opacity_slider.on_change = change_opacity
    """
    Сreate text labels to identify the sliders.
    """

    def build_label(text):
        label = ui.TextBlock2D()
        label.message = text
        label.font_size = 18
        label.font_family = 'Arial'
        label.justification = 'left'
        label.bold = False
        label.italic = False
        label.shadow = False
        label.background = (0, 0, 0)
        label.color = (1, 1, 1)
        return label

    line_slider_label_z = build_label(text="Z Slice")
    line_slider_label_x = build_label(text="X Slice")
    line_slider_label_y = build_label(text="Y Slice")
    opacity_slider_label = build_label(text="Opacity")
    """
    Create a ``panel`` to contain the sliders and labels.
    """

    panel = ui.Panel2D(size=(300, 200),
                       color=(1, 1, 1),
                       opacity=0.1,
                       align="right")
    panel.center = (1030, 120)

    panel.add_element(line_slider_label_x, (0.1, 0.75))
    panel.add_element(line_slider_x, (0.38, 0.75))
    panel.add_element(line_slider_label_y, (0.1, 0.55))
    panel.add_element(line_slider_y, (0.38, 0.55))
    panel.add_element(line_slider_label_z, (0.1, 0.35))
    panel.add_element(line_slider_z, (0.38, 0.35))
    panel.add_element(opacity_slider_label, (0.1, 0.15))
    panel.add_element(opacity_slider, (0.38, 0.15))

    scene.add(panel)
    """
    Create a ``panel`` to show the value of a picked voxel.
    """

    label_position = ui.TextBlock2D(text='Position:')
    label_value = ui.TextBlock2D(text='Value:')

    result_position = ui.TextBlock2D(text='')
    result_value = ui.TextBlock2D(text='')

    text2 = ui.TextBlock2D(text='Calculate')

    panel_picking = ui.Panel2D(size=(250, 125),
                               color=(1, 1, 1),
                               opacity=0.1,
                               align="left")
    panel_picking.center = (200, 120)

    panel_picking.add_element(label_position, (0.1, 0.75))
    panel_picking.add_element(label_value, (0.1, 0.45))

    panel_picking.add_element(result_position, (0.45, 0.75))
    panel_picking.add_element(result_value, (0.45, 0.45))

    panel_picking.add_element(text2, (0.1, 0.15))

    icon_files = []
    icon_files.append(('left', read_viz_icons(fname='circle-left.png')))
    button_example = ui.Button2D(icon_fnames=icon_files, size=(100, 30))
    panel_picking.add_element(button_example, (0.5, 0.1))

    def change_text_callback(i_ren, obj, button):
        text2.message = str(i_coord) + ' ' + str(j_coord) + ' ' + str(k_coord)
        torusActor.SetPosition(i_coord, j_coord, k_coord)
        print(i_coord, j_coord, k_coord)
        lut, colors = change_TMS_effects(i_coord, j_coord, k_coord)
        scene.rm(actor_line_list[0])
        actor_line_list.append(
            actor.line(bundle_native,
                       colors,
                       linewidth=5,
                       fake_tube=True,
                       lookup_colormap=lut))
        scene.add(actor_line_list[1])

        nonlocal number_of_stimulations
        global bar
        if number_of_stimulations > 0:
            scene.rm(bar)
        else:
            number_of_stimulations = number_of_stimulations + 1

        bar = actor.scalar_bar(lut)
        bar.SetTitle("TMS effect")

        bar.SetHeight(0.3)
        bar.SetWidth(0.10)  # the width is set first
        bar.SetPosition(0.85, 0.3)
        scene.add(bar)

        actor_line_list.pop(0)
        i_ren.force_render()

    button_example.on_left_mouse_button_clicked = change_text_callback

    scene.add(panel_picking)
    scene.add(torusActor)

    def left_click_callback(obj, ev):
        """Get the value of the clicked voxel and show it in the panel."""
        event_pos = show_m.iren.GetEventPosition()

        obj.picker.Pick(event_pos[0], event_pos[1], 0, scene)

        global i_coord, j_coord, k_coord
        i_coord, j_coord, k_coord = obj.picker.GetPointIJK()
        print(i_coord, j_coord, k_coord)
        result_position.message = '({}, {}, {})'.format(
            str(i_coord), str(j_coord), str(k_coord))
        result_value.message = '%.8f' % data_T1[i_coord, j_coord, k_coord]
        torusActor.SetPosition(i_coord, j_coord, k_coord)

    image_actor_z.AddObserver('LeftButtonPressEvent', left_click_callback, 1.0)

    global size
    size = scene.GetSize()

    def win_callback(obj, event):
        global size
        if size != obj.GetSize():
            size_old = size
            size = obj.GetSize()
            size_change = [size[0] - size_old[0], 0]
            panel.re_align(size_change)

    show_m.initialize()
    """
    Set the following variable to ``True`` to interact with the datasets in 3D.
    """
    interactive = True

    scene.zoom(2.0)
    scene.reset_clipping_range()
    scene.set_camera(position=(-642.07, 495.40, 148.49),
                     focal_point=(127.50, 127.50, 127.50),
                     view_up=(0.02, -0.01, 1.00))

    if interactive:
        show_m.add_window_callback(win_callback)
        show_m.render()
        show_m.start()
    else:
        window.record(scene,
                      out_path=out_dir + '/bundles_and_effects.png',
                      size=(1200, 900),
                      reset_camera=True)
Example #19
0
def dots_segmentation(tensor_image,
                      mask,
                      atlas_dir,
                      wm_atlas=1,
                      max_iter=25,
                      convergence_threshold=0.005,
                      s_I=1 / 42,
                      c_O=0.5,
                      max_angle=67.5,
                      save_data=False,
                      overwrite=False,
                      output_dir=None,
                      file_name=None):
    """DOTS segmentation

    Segment major white matter tracts in diffusion tensor images using Diffusion
    Oriented Tract Segmentation (DOTS) algorithm.
    
    Parameters
    ----------
    tensor_image: niimg
        Input image containing the diffusion tensor coefficients in the
        following order: volumes 0-5: D11, D22, D33, D12, D13, D23
    mask: niimg
        Binary brain mask image which limits computation to the defined volume.
    atlas_dir: str
        Path to directory where the DOTS atlas information is stored. The atlas
        information should be stored in a subdirectory called 'DOTS_atlas' as
        generated by nighres.data.download_DOTS_atlas().
    wm_atlas: int, optional
        Define which white matter atlas to use. Option 1 for 23 tracts [2]_ 
        and option 2 for 39 tracts [1]_. (default is 1)
    max_iter: int, optional
        Maximum number of iterations in the conditional modes algorithm.
        (default is 20)
    convergence_threshold: float, optional
        Threshold for when the iterated conditonal modes algorithm is considered
        to have converged. Defined as the fraction of labels that change during
        one step of the algorithm. (default is 0.002)
    s_I: float, optional
        Parameter controlling how isotropic label energies propagate to their
        neighborhood. (default is 1/42)
    c_O: float, optional
        Weight parameter for unclassified white matter atlas prior. (default
        is 1/2)
    max_angle: float, optional
        Maximum angle (in degrees) between principal tensor directions before 
        connectivity coefficient c becomes negative. Possible values between 0
        and 90. (default is 67.5)
    save_data: bool, optional
        Save output data to file. (default is False)
    overwrite: bool, optional
        Overwrite existing results. (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist.
    file_name: str, optional
        Desired base name for output files without file extension, suffixes 
        will be added.

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (type of output files in brackets)

        * segmentation (array_like): Hard segmentation of white matter.
        * posterior (array_like): POsterior probabilities of tracts.
        
    Notes
    ----------
    Algorithm details can be found in the references below.

    References
    ----------
    .. [1] Bazin, Pierre-Louis, et al. "Direct segmentation of the major white 
       matter tracts in diffusion tensor images." Neuroimage (2011)
       doi: https://doi.org/10.1016/j.neuroimage.2011.06.020
    .. [2] Bazin, Pierre-Louis, et al. "Efficient MRF segmentation of DTI white 
       matter tracts using an overlapping fiber model." Proceedings of the 
       International Workshop on Diffusion Modelling and Fiber Cup (2009)
    """

    print('\nDOTS white matter tract segmentation')

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, tensor_image)

        seg_file = os.path.join(
            output_dir,
            _fname_4saving(module=__name__,
                           file_name=file_name,
                           rootfile=tensor_image,
                           suffix='dots-seg'))

        proba_file = os.path.join(
            output_dir,
            _fname_4saving(module=__name__,
                           file_name=file_name,
                           rootfile=tensor_image,
                           suffix='dots-proba'))

        if overwrite is False \
            and os.path.isfile(seg_file) and os.path.isfile(proba_file) :
            print("skip computation (use existing results)")
            output = {'segmentation': seg_file, 'posterior': proba_file}
            return output

    # For external tools: dipy
    try:
        from dipy.align.transforms import AffineTransform3D
        from dipy.align.imaffine import MutualInformationMetric, AffineRegistration
    except ImportError:
        print('Error: Dipy could not be imported, it is required' +
              ' in order to run DOTS segmentation. \n (aborting)')
        return None

    # Ignore runtime warnings that arise from trying to divide by 0/nan
    # and all nan slices
    np.seterr(divide='ignore', invalid='ignore')

    # Define the scalar constant c_I
    c_I = 1 / 2

    # Define constant c_C that is used in direction coefficient calculation
    c_C = 90 / max_angle

    # Create an array containing the directions between neighbors
    v_xy = np.zeros((3, 3, 3, 3))
    for i in range(3):
        for j in range(3):
            for k in range(3):
                if (i, j, k) == (1, 1, 1):
                    v_xy[i, j, k, :] = np.nan
                else:
                    x = np.array([1, 0, 0])
                    y = np.array([0, 1, 0])
                    z = np.array([0, 0, 1])
                    c = np.array([1, 1, 1])
                    v_xy[i, j, k, :] = i * x + y * j + z * k - c
                    v_xy[i,j,k,:] = v_xy[i,j,k,:] / \
                                    np.linalg.norm(v_xy[i,j,k,:])

    # Load tensor image
    tensor_volume = load_volume(tensor_image).get_fdata()

    # Load brain mask
    brain_mask = load_volume(mask).get_fdata().astype(bool)

    # Get dimensions of diffusion data
    xs, ys, zs, _ = tensor_volume.shape
    DWI_affine = load_volume(tensor_image).affine

    # Calculate diffusion tensor eigenvalues and eigenvectors
    tenfit = np.zeros((xs, ys, zs, 3, 3))
    tenfit[:, :, :, 0, 0] = tensor_volume[:, :, :, 0]
    tenfit[:, :, :, 1, 1] = tensor_volume[:, :, :, 1]
    tenfit[:, :, :, 2, 2] = tensor_volume[:, :, :, 2]
    tenfit[:, :, :, 0, 1] = tensor_volume[:, :, :, 3]
    tenfit[:, :, :, 1, 0] = tensor_volume[:, :, :, 3]
    tenfit[:, :, :, 0, 2] = tensor_volume[:, :, :, 4]
    tenfit[:, :, :, 2, 0] = tensor_volume[:, :, :, 4]
    tenfit[:, :, :, 1, 2] = tensor_volume[:, :, :, 5]
    tenfit[:, :, :, 2, 1] = tensor_volume[:, :, :, 5]
    tenfit[np.isnan(tenfit)] = 0
    evals, evecs = np.linalg.eig(tenfit)
    evals, evecs = np.real(evals), np.real(evecs)
    for i in range(xs):
        for j in range(ys):
            for k in range(zs):
                idx = np.argsort(evals[i, j, k, :])[::-1]
                evecs[i, j, k, :, :] = evecs[i, j, k, :, idx].T
                evals[i, j, k, :] = evals[i, j, k, idx]
    evals[~brain_mask] = 0
    evecs[~brain_mask] = 0

    # Calculate FA
    R = tenfit / np.trace(tenfit, axis1=3, axis2=4)[:, :, :, np.newaxis,
                                                    np.newaxis]
    FA = np.sqrt(0.5 * (3 - 1 / (np.trace(np.matmul(R, R), axis1=3, axis2=4))))
    FA[np.isnan(FA)] = 0

    if wm_atlas == 1:

        # Use smaller atlas
        # Indices are
        # 0 for isotropic regions
        # 1 for unclassified white matter
        # 2-22 for individual tracts
        # 22-73 for overlapping tracts
        N_t = 23
        N_o = 50
        atlas_path = os.path.join(atlas_dir, 'DOTS_atlas')
        fiber_p = nb.load(os.path.join(atlas_path,
                                       'fiber_p.nii.gz')).get_fdata()
        max_p = np.nanmax(fiber_p[:, :, :, 2::], axis=3)
        fiber_dir = nb.load(os.path.join(atlas_path,
                                         'fiber_dir.nii.gz')).get_fdata()
        atlas_affine = nb.load(os.path.join(atlas_path,
                                            'fiber_p.nii.gz')).affine
        del_idx = [
            9, 10, 13, 14, 15, 16, 21, 26, 27, 28, 29, 30, 31, 32, 33, 36, 37,
            38
        ]
        fiber_p = np.delete(fiber_p, del_idx, axis=3)
        fiber_dir = np.delete(fiber_dir, del_idx, axis=4)
        tract_pair_sets = tract_pair_sets_1

    elif wm_atlas == 2:

        # Use full atlas
        # Indices are
        # 0 for isotropic regions
        # 1 for unclassified white matter
        # 2-40 for individual tracts
        # 41-224 for overlapping tracts
        N_t = 41
        N_o = 185
        atlas_path = os.path.join(atlas_dir, 'DOTS_atlas')
        fiber_p = nb.load(os.path.join(atlas_path,
                                       'fiber_p.nii.gz')).get_fdata()
        max_p = np.nanmax(fiber_p[:, :, :, 2::], axis=3)
        fiber_dir = nb.load(os.path.join(atlas_path,
                                         'fiber_dir.nii.gz')).get_fdata()
        atlas_affine = nb.load(os.path.join(atlas_path,
                                            'fiber_p.nii.gz')).affine
        tract_pair_sets = tract_pair_sets_2

    print('Diffusion and atlas data loaded ')

    # Register atlas priors to DWI data with DiPy
    print('Registering atlas priors to DWI data')
    metric = MutualInformationMetric(nbins=32, sampling_proportion=None)
    affreg = AffineRegistration(metric=metric,
                                level_iters=[10000, 1000, 100],
                                sigmas=[3.0, 1.0, 0.0],
                                factors=[4, 2, 1])
    transformation = affreg.optimize(FA,
                                     max_p,
                                     AffineTransform3D(),
                                     params0=None,
                                     static_grid2world=DWI_affine,
                                     moving_grid2world=atlas_affine,
                                     starting_affine='mass')
    reg_fiber_p = np.zeros((xs, ys, zs, fiber_p.shape[-1]))
    for i in range(fiber_p.shape[-1]):
        reg_fiber_p[:, :, :, i] = transformation.transform(fiber_p[:, :, :, i])
    fiber_p = reg_fiber_p
    reg_fiber_dir = np.zeros((xs, ys, zs, 3, fiber_dir.shape[-1]))
    for i in range(fiber_dir.shape[-1]):
        for j in range(3):
            reg_fiber_dir[:, :, :, j,
                          i] = transformation.transform(fiber_dir[:, :, :, j,
                                                                  i])
    fiber_dir = reg_fiber_dir
    fiber_p[~brain_mask, 0] = 1
    fiber_p[~brain_mask, 1:] = 0
    fiber_dir[~brain_mask] = 0
    print('Finished registration of atlas priors to DWI data')

    # Calculate diffusion type indices
    print('Calculating d_T, d_O, d_I')
    d_T = (evals[:, :, :, 0] - evals[:, :, :, 1]) / evals[:, :, :, 0]
    d_O = (evals[:, :, :, 0] - evals[:, :, :, 2]) / evals[:, :, :, 0]
    d_I = evals[:, :, :, 2] / evals[:, :, :, 0]
    print('Finished calculating d_T, d_O, d_I')

    # Calculate xplus and xminus
    x_m_s_T = np.zeros((xs, ys, zs, 3))
    x_p_s_T = np.zeros((xs, ys, zs, 3))
    x_m_s_O = np.zeros((xs, ys, zs, 3))
    x_p_s_O = np.zeros((xs, ys, zs, 3))
    s_T_x_m = np.zeros((xs, ys, zs))
    s_T_x_p = np.zeros((xs, ys, zs))
    s_O_x_m = np.zeros((xs, ys, zs))
    s_O_x_p = np.zeros((xs, ys, zs))
    print('Calculating x^+, x^-, s_T, s_O')
    for i in range(1, xs - 1):
        print(str(np.round((i / xs) * 100, 0)) + ' %', end="\r")
        for j in range(1, ys - 1):
            for k in range(1, zs - 1):
                if brain_mask[i, j, k]:
                    x_m_s_T[i, j, k, :], s_T_x_m[i, j, k] = _calc_x_minus_s_T(
                        i, j, k, evecs, v_xy)
                    x_p_s_T[i, j, k, :], s_T_x_p[i, j, k] = _calc_x_plus_s_T(
                        i, j, k, evecs, v_xy)
                    x_m_s_O[i, j, k, :], s_O_x_m[i, j, k] = _calc_x_minus_s_O(
                        i, j, k, evals, evecs, v_xy)
                    x_p_s_O[i, j, k, :], s_O_x_p[i, j, k] = _calc_x_plus_s_O(
                        i, j, k, evals, evecs, v_xy)
    x_p_s_T = x_p_s_T.astype(int)
    x_m_s_T = x_m_s_T.astype(int)
    x_p_s_O = x_p_s_T.astype(int)
    x_m_s_O = x_m_s_T.astype(int)
    print('Finished calculating x^+, x^-, s_T, s_O')

    # Calculate shape prior arrays
    print('Calculating u_l, u_lm')
    u_l = fiber_p**2 / np.nansum(fiber_p, axis=3)[:, :, :, np.newaxis]
    u_lm = np.zeros((xs, ys, zs, len(tract_pair_sets)))
    for idx in range(len(tract_pair_sets)):
        l, m = tract_pair_sets[idx]
        u_lm[:,:,:,idx] = fiber_p[:,:,:,l]*fiber_p[:,:,:,m]*(fiber_p[:,:,:,l]
                          + fiber_p[:,:,:,m]) / \
                          np.nansum(fiber_p, axis=3)
    u_l[:, :, :, 1] *= c_O  # Scale by weight parameter
    print('Finished calculating u_l, u_lm')

    # Calculate direction coefficients
    c_l = np.zeros((xs, ys, zs, N_t)) * np.nan
    c_lm = np.zeros((xs, ys, zs, len(tract_pair_sets))) * np.nan
    print('Calculating c_l, c_lm')
    for i in range(xs):
        print(str(np.round((i / xs) * 100, 0)) + ' %', end="\r")
        for j in range(ys):
            for k in range(zs):
                for l in range(1, N_t):
                    if fiber_p[i, j, k, l] != 0:
                        c_l[i, j, k, l] = _calc_c_l(i, j, k, l, None, evecs,
                                                    fiber_dir, c_C)
                for idx in range(len(tract_pair_sets)):
                    l, m = tract_pair_sets[idx]
                    if fiber_p[i, j, k, l] != 0 and fiber_p[i, j, k, m] != 0:
                        c_lm[i, j, k, idx] = _calc_c_l(i, j, k, l, m, evecs,
                                                       fiber_dir, c_C)
    print('Finished calculating c_l, c_lm')

    # Mask arrays
    d_T[~brain_mask] = np.nan
    d_O[~brain_mask] = np.nan
    d_I[~brain_mask] = 1
    fiber_p[~brain_mask, 0] = 1
    fiber_p[~brain_mask, 1:] = np.nan
    fiber_dir[~brain_mask] = np.nan
    c_l[~brain_mask] = np.nan
    c_lm[~brain_mask] = np.nan
    u_l[~brain_mask] = np.nan
    u_l[~brain_mask, 0] = 1
    u_lm[~brain_mask] = np.nan
    s_T_x_p[~brain_mask] = np.nan
    s_T_x_m[~brain_mask] = np.nan
    s_O_x_p[~brain_mask] = np.nan
    s_O_x_m[~brain_mask] = np.nan

    # Only ROIs where p != 0 are of interest
    u_l[u_l == 0] = np.nan
    u_lm[u_lm == 0] = np.nan

    # Calculate energy based on unary term only
    MRF_V1 = _calc_V1(d_T, d_O, d_I, u_l, u_lm, c_l, c_lm, c_I, fiber_p,
                      tract_pair_sets, N_t, N_o, brain_mask)

    # Maximize U
    print('Maximizing U')
    curr_U = np.copy(MRF_V1)
    iteration = 0
    change_in_labels = np.inf
    while iteration < max_iter and change_in_labels > convergence_threshold:
        at = time.time()
        prev_U = np.copy(curr_U)
        prev_segmentation = _calc_segmentation(prev_U)
        iteration += 1
        print('Iteration ' + str(iteration))

        curr_U = _calc_U(prev_U, d_T, d_O, d_I, u_l, u_lm, c_l, c_lm, c_I,
                         fiber_p, tract_pair_sets, s_I, s_T_x_p, s_T_x_m,
                         s_O_x_m, s_O_x_p, brain_mask, N_t, N_o, x_m_s_T,
                         x_p_s_T, x_m_s_O, x_p_s_O)

        curr_segmentation = _calc_segmentation(curr_U)
        change_in_labels = (np.nansum(prev_segmentation != curr_segmentation) /
                            np.nansum(brain_mask))
        bt = time.time()
        print('Iteration ' + str(iteration) + ' took ' + str(bt - at) +
              ' seconds')
        print('Total U = ' + str(np.nansum(curr_U)))
        print('Fraction of changed labels = ' + str(change_in_labels))
    print('Finished maximizing U')

    # Calculate posterior probabilities
    print('Calculating posterior probabilities')
    fiber_posterior = np.zeros(fiber_p.shape)
    curr_U[curr_U == 0] = np.nan
    for l in range(N_t):
        print(str(np.round((l / N_t) * 100, 0)) + ' %', end="\r")
        fiber_posterior[:, :, :, l] = calc_posterior_probability(l, curr_U, 1)
    fiber_posterior[fiber_posterior == 0] = np.nan
    fiber_posterior[np.isinf(fiber_posterior)] = np.nan
    curr_U[np.isnan(curr_U)] = 0
    print('Finished calculating posterior probabilities')

    # Save results
    if save_data:
        save_volume(seg_file, nb.Nifti1Image(curr_segmentation, DWI_affine))
        save_volume(proba_file, nb.Nifti1Image(fiber_posterior, DWI_affine))

        return {'segmentation': seg_file, 'posterior': proba_file}

    else:
        # Return results
        return {
            'segmentation': curr_segmentation,
            'posterior': fiber_posterior
        }
def register_save(inputpathdir, target_path, subject, outputpath, figspath,
                  params, registration_types, applydirs, verbose):
    anat_path = get_anat(inputpathdir, subject)
    #myanat = load_nifti(anat_path)
    myanat = nib.load(anat_path)
    anat_data = np.squeeze(myanat.get_data()[..., 0])
    anat_affine = myanat.affine
    anat_hdr = myanat.header
    vox_size = myanat.header.get_zooms()[0]
    #mynifti = load_nifti("/Volumes/Data/Badea/Lab/19abb14/N57437_nii4D.nii")
    #anat_data = np.squeeze(myanat[0])[..., 0]
    #anat_affine = myanat[1]
    #hdr = myanat.header

    mytarget = nib.load(target_path)
    target_data = np.squeeze(mytarget.get_data()[..., 0])
    target_affine = mytarget.affine

    identity = np.eye(4)

    affine_map = AffineMap(identity, target_data.shape, target_affine,
                           anat_data.shape, anat_affine)
    resampled = affine_map.transform(anat_data)
    """
    regtools.overlay_slices(target_data, resampled, None, 0,
                            "target_data", "anat_data", figspath + "resampled_0.png")
    regtools.overlay_slices(target_data, resampled, None, 1,
                            "target_data", "anat_data", figspath + "resampled_1.png")
    regtools.overlay_slices(target_data, resampled, None, 2,
                            "target_data", "anat_data", figspath + "resampled_2.png")
    """
    c_of_mass = transform_centers_of_mass(target_data, target_affine,
                                          anat_data, anat_affine)
    apply_niftis = []
    apply_trks = []
    if inputpathdir in applydirs:
        applyfiles = [anat_path]
    else:
        applyfiles = []
    for applydir in applydirs:
        apply_niftis.extend(get_niftis(applydir, subject))
        apply_trks.extend(get_trks(applydir, subject))

    if "center_mass" in registration_types:

        if apply_trks:
            metric = CCMetric(3)
            level_iters = [10, 10, 5]
            sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
            mapping = sdr.optimize(target_data, anat_data, target_affine,
                                   anat_affine, c_of_mass.affine)

        for apply_nifti in apply_niftis:
            fname = os.path.basename(apply_nifti).split(".")[0]
            fpath = outputpath + fname + "_centermass.nii"
            applynii = nib.load(apply_nifti)
            apply_data = applynii.get_data()
            apply_affine = applynii.affine
            apply_hdr = myanat.header

            if len(np.shape(apply_data)) == 4:
                transformed_all = c_of_mass.transform(apply_data, apply4D=True)
                transformed = transformed_all[:, :, :, 0]
            else:
                transformed_all = c_of_mass.transform(apply_data)
                transformed = transformed_all
            save_nifti(fpath, transformed_all, apply_affine, hdr=apply_hdr)
            if figspath is not None:
                regtools.overlay_slices(target_data, transformed, None, 0,
                                        "target_data", "Transformed",
                                        figspath + fname + "_centermass_1.png")
                regtools.overlay_slices(target_data, transformed, None, 1,
                                        "target_data", "Transformed",
                                        figspath + fname + "_centermass_2.png")
                regtools.overlay_slices(target_data, transformed, None, 2,
                                        "target_data", "Transformed",
                                        figspath + fname + "_centermass_3.png")
            if verbose:
                print("Saved the file at " + fpath)
        #mapping = sdr.optimize(target_data, anat_data, target_affine, anat_affine,
        #                       c_of_mass.affine)
        #warped_moving = mapping.transform(anat_data)
        for apply_trk in apply_trks:

            fname = os.path.basename(apply_trk).split(".")[0]
            fpath = outputpath + fname + "_centermass.trk"

            sft = load_tractogram(apply_trk, 'same')
            target_isocenter = np.diag(
                np.array([-vox_size, vox_size, vox_size, 1]))
            origin_affine = affine_map.affine.copy()
            origin_affine[0][3] = -origin_affine[0][3]
            origin_affine[1][3] = -origin_affine[1][3]
            origin_affine[2][3] = origin_affine[2][3] / vox_size

            origin_affine[1][3] = origin_affine[1][3] / vox_size**2

            # Apply the deformation and correct for the extents
            mni_streamlines = deform_streamlines(
                sft.streamlines,
                deform_field=mapping.get_forward_field(),
                stream_to_current_grid=target_isocenter,
                current_grid_to_world=origin_affine,
                stream_to_ref_grid=target_isocenter,
                ref_grid_to_world=np.eye(4))

            if has_fury:

                show_template_bundles(mni_streamlines,
                                      anat_data,
                                      show=False,
                                      fname=figspath + fname +
                                      '_streamlines_centermass.png')

            sft = StatefulTractogram(mni_streamlines, myanat, Space.RASMM)

            save_tractogram(sft, fpath, bbox_valid_check=False)
            if verbose:
                print("Saved the file at " + fpath)

    metric = MutualInformationMetric(params.nbins, params.sampling_prop)

    if "AffineRegistration" in registration_types:
        affreg = AffineRegistration(metric=metric,
                                    level_iters=params.level_iters,
                                    sigmas=params.sigmas,
                                    factors=params.factors)

        transform = TranslationTransform3D()
        params0 = None
        starting_affine = c_of_mass.affine
        translation = affreg.optimize(target_data,
                                      anat_data,
                                      transform,
                                      params0,
                                      target_affine,
                                      anat_affine,
                                      starting_affine=starting_affine)

        if apply_trks:
            metric = CCMetric(3)
            level_iters = [10, 10, 5]
            sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
            mapping = sdr.optimize(target_data, anat_data, target_affine,
                                   anat_affine, translation.affine)

        for apply_nifti in apply_niftis:
            fname = os.path.basename(apply_nifti).split(".")[0]
            fpath = outputpath + fname + "_affinereg.nii"

            applynii = nib.load(apply_nifti)
            apply_data = applynii.get_data()
            apply_affine = applynii.affine
            apply_hdr = myanat.header

            if len(np.shape(apply_data)) == 4:
                transformed_all = translation.transform(apply_data,
                                                        apply4D=True)
                transformed = transformed_all[:, :, :, 0]
            else:
                transformed_all = translation.transform(apply_data)
                transformed = transformed_all
            save_nifti(fpath, transformed_all, anat_affine, hdr=anat_hdr)
            if figspath is not None:
                regtools.overlay_slices(target_data, transformed, None, 0,
                                        "target_data", "Transformed",
                                        figspath + fname + "_affinereg_1.png")
                regtools.overlay_slices(target_data, transformed, None, 1,
                                        "target_data", "Transformed",
                                        figspath + fname + "_affinereg_2.png")
                regtools.overlay_slices(target_data, transformed, None, 2,
                                        "target_data", "Transformed",
                                        figspath + fname + "_affinereg_3.png")
            if verbose:
                print("Saved the file at " + fpath)

        for apply_trk in apply_trks:

            fname = os.path.basename(apply_trk).split(".")[0]
            fpath = outputpath + fname + "_affinereg.trk"

            sft = load_tractogram(apply_trk, 'same')
            target_isocenter = np.diag(
                np.array([-vox_size, vox_size, vox_size, 1]))
            origin_affine = affine_map.affine.copy()
            origin_affine[0][3] = -origin_affine[0][3]
            origin_affine[1][3] = -origin_affine[1][3]
            origin_affine[2][3] = origin_affine[2][3] / vox_size

            origin_affine[1][3] = origin_affine[1][3] / vox_size**2

            # Apply the deformation and correct for the extents
            mni_streamlines = deform_streamlines(
                sft.streamlines,
                deform_field=mapping.get_forward_field(),
                stream_to_current_grid=target_isocenter,
                current_grid_to_world=origin_affine,
                stream_to_ref_grid=target_isocenter,
                ref_grid_to_world=np.eye(4))

            if has_fury:

                show_template_bundles(mni_streamlines,
                                      anat_data,
                                      show=False,
                                      fname=figspath + fname +
                                      '_streamlines_affinereg.png')

            sft = StatefulTractogram(mni_streamlines, myanat, Space.RASMM)

            save_tractogram(sft, fpath, bbox_valid_check=False)
            if verbose:
                print("Saved the file at " + fpath)

    if "RigidTransform3D" in registration_types:
        transform = RigidTransform3D()
        params0 = None
        if 'translation' not in locals():
            affreg = AffineRegistration(metric=metric,
                                        level_iters=params.level_iters,
                                        sigmas=params.sigmas,
                                        factors=params.factors)
            translation = affreg.optimize(target_data,
                                          anat_data,
                                          transform,
                                          params0,
                                          target_affine,
                                          anat_affine,
                                          starting_affine=c_of_mass.affine)
        starting_affine = translation.affine
        rigid = affreg.optimize(target_data,
                                anat_data,
                                transform,
                                params0,
                                target_affine,
                                anat_affine,
                                starting_affine=starting_affine)

        transformed = rigid.transform(anat_data)

        if apply_trks:
            metric = CCMetric(3)
            level_iters = [10, 10, 5]
            sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
            mapping = sdr.optimize(target_data, anat_data, target_affine,
                                   anat_affine, rigid.affine)

        for apply_nifti in apply_niftis:
            fname = os.path.basename(apply_nifti).split(".")[0]
            fpath = outputpath + fname + "_rigidtransf3d.nii"

            applynii = nib.load(apply_nifti)
            apply_data = applynii.get_data()
            apply_affine = applynii.affine
            apply_hdr = myanat.header

            if len(np.shape(apply_data)) == 4:
                transformed_all = rigid.transform(apply_data, apply4D=True)
                transformed = transformed_all[:, :, :, 0]
            else:
                transformed_all = rigid.transform(apply_data)
                transformed = transformed_all
            save_nifti(fpath, transformed_all, anat_affine, hdr=anat_hdr)
            if figspath is not None:
                regtools.overlay_slices(
                    target_data, transformed, None, 0, "target_data",
                    "Transformed", figspath + fname + "_rigidtransf3d_1.png")
                regtools.overlay_slices(
                    target_data, transformed, None, 1, "target_data",
                    "Transformed", figspath + fname + "_rigidtransf3d_2.png")
                regtools.overlay_slices(
                    target_data, transformed, None, 2, "target_data",
                    "Transformed", figspath + fname + "_rigidtransf3d_3.png")
            if verbose:
                print("Saved the file at " + fpath)

        for apply_trk in apply_trks:

            fname = os.path.basename(apply_trk).split(".")[0]
            fpath = outputpath + fname + "_rigidtransf3d.trk"

            sft = load_tractogram(apply_trk, 'same')
            target_isocenter = np.diag(
                np.array([-vox_size, vox_size, vox_size, 1]))
            origin_affine = affine_map.affine.copy()
            origin_affine[0][3] = -origin_affine[0][3]
            origin_affine[1][3] = -origin_affine[1][3]
            origin_affine[2][3] = origin_affine[2][3] / vox_size

            origin_affine[1][3] = origin_affine[1][3] / vox_size**2

            # Apply the deformation and correct for the extents
            mni_streamlines = deform_streamlines(
                sft.streamlines,
                deform_field=mapping.get_forward_field(),
                stream_to_current_grid=target_isocenter,
                current_grid_to_world=origin_affine,
                stream_to_ref_grid=target_isocenter,
                ref_grid_to_world=np.eye(4))

            if has_fury:

                show_template_bundles(mni_streamlines,
                                      anat_data,
                                      show=False,
                                      fname=figspath + fname +
                                      '_rigidtransf3d.png')

            sft = StatefulTractogram(mni_streamlines, myanat, Space.RASMM)

            save_tractogram(sft, fpath, bbox_valid_check=False)
            if verbose:
                print("Saved the file at " + fpath)

    if "AffineTransform3D" in registration_types:
        transform = AffineTransform3D()
        params0 = None
        starting_affine = rigid.affine
        affine = affreg.optimize(target_data,
                                 anat_data,
                                 transform,
                                 params0,
                                 target_affine,
                                 anat_affine,
                                 starting_affine=starting_affine)

        transformed = affine.transform(anat_data)

        if apply_trks:
            metric = CCMetric(3)
            level_iters = [10, 10, 5]
            sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
            mapping = sdr.optimize(target_data, anat_data, target_affine,
                                   anat_affine, affine.affine)

        for apply_nifti in apply_niftis:
            fname = os.path.basename(apply_nifti).split(".")[0]
            fpath = outputpath + fname + "_affinetransf3d.nii"

            applynii = nib.load(apply_nifti)
            apply_data = applynii.get_data()
            apply_affine = applynii.affine
            apply_hdr = myanat.header

            if len(np.shape(apply_data)) == 4:
                transformed_all = affine.transform(apply_data, apply4D=True)
                transformed = transformed_all[:, :, :, 0]
            else:
                transformed_all = affine.transform(apply_data)
                transformed = transformed_all
            save_nifti(fpath, transformed_all, anat_affine, hdr=anat_hdr)
            if figspath is not None:
                regtools.overlay_slices(
                    target_data, transformed, None, 0, "target_data",
                    "Transformed", figspath + fname + "_affinetransf3d_1.png")
                regtools.overlay_slices(
                    target_data, transformed, None, 1, "target_data",
                    "Transformed", figspath + fname + "_affinetransf3d_2.png")
                regtools.overlay_slices(
                    target_data, transformed, None, 2, "target_data",
                    "Transformed", figspath + fname + "_affinetransf3d_3.png")
            if verbose:
                print("Saved the file at " + fpath)

        for apply_trk in apply_trks:

            fname = os.path.basename(apply_trk).split(".")[0]
            fpath = outputpath + fname + "_affinetransf3d.trk"

            sft = load_tractogram(apply_trk, 'same')
            target_isocenter = np.diag(
                np.array([-vox_size, vox_size, vox_size, 1]))
            origin_affine = affine_map.affine.copy()
            origin_affine[0][3] = -origin_affine[0][3]
            origin_affine[1][3] = -origin_affine[1][3]
            origin_affine[2][3] = origin_affine[2][3] / vox_size

            origin_affine[1][3] = origin_affine[1][3] / vox_size**2

            # Apply the deformation and correct for the extents
            mni_streamlines = deform_streamlines(
                sft.streamlines,
                deform_field=mapping.get_forward_field(),
                stream_to_current_grid=target_isocenter,
                current_grid_to_world=origin_affine,
                stream_to_ref_grid=target_isocenter,
                ref_grid_to_world=np.eye(4))

            if has_fury:

                show_template_bundles(mni_streamlines,
                                      anat_data,
                                      show=False,
                                      fname=figspath + fname +
                                      '_streamlines_affinetransf3d.png')

            sft = StatefulTractogram(mni_streamlines, myanat, Space.RASMM)

            save_tractogram(sft, fpath, bbox_valid_check=False)
            if verbose:
                print("Saved the file at " + fpath)
Example #21
0
    def generate_warp_field(self, static, moving, static_axis_units,
                            moving_axis_units):
        from numpy import eye
        from dipy.align.imaffine import AffineRegistration
        from dipy.align.transforms import (
            TranslationTransform3D,
            RigidTransform3D,
            AffineTransform3D,
        )
        from dipy.align.imwarp import SymmetricDiffeomorphicRegistration as SDR

        static_g2w = eye(1 + static.ndim)
        moving_g2w = static_g2w.copy()
        params0 = None

        static_g2w[range(static.ndim), range(static.ndim)] = static_axis_units
        moving_g2w[range(moving.ndim), range(moving.ndim)] = moving_axis_units

        self.affreg = AffineRegistration(
            metric=self.metric_lin,
            level_iters=self.level_iters_lin,
            sigmas=self.sigmas,
            factors=self.factors,
            verbosity=self.verbosity,
            ss_sigma_factor=self.ss_sigma_factor,
        )

        self.sdreg = SDR(
            metric=self.metric_syn,
            level_iters=self.level_iters_syn,
            ss_sigma_factor=self.ss_sigma_factor,
        )

        self.translation_tx = self.affreg.optimize(
            static,
            moving,
            TranslationTransform3D(),
            params0,
            static_g2w,
            moving_g2w,
            starting_affine="mass",
        )

        self.rigid_tx = self.affreg.optimize(
            static,
            moving,
            RigidTransform3D(),
            params0,
            static_g2w,
            moving_g2w,
            starting_affine=self.translation_tx.affine,
        )

        self.affine_tx = self.affreg.optimize(
            static,
            moving,
            AffineTransform3D(),
            params0,
            static_g2w,
            moving_g2w,
            starting_affine=self.rigid_tx.affine,
        )

        self.sdr_tx = self.sdreg.optimize(static, moving, static_g2w,
                                          moving_g2w, self.affine_tx.affine)
Example #22
0
def ROI_registration(datapath, template, t1, b0, roi):

    t1_path = datapath + '/' + t1
    b0_path = datapath + '/' + b0
    roi_path = datapath + '/' + roi
    template_path = datapath + '/' + template

    template_img, template_affine = load_nifti(template_path)
    t1_img, t1_affine = load_nifti(t1_path)
    b0_img, b0_affine = load_nifti(b0_path)
    roi_img, roi_affine = load_nifti(roi_path)

    #diff2struct affine registartion

    moving = b0_img
    moving_grid2world = b0_affine
    static = t1_img
    static_grid2world = t1_affine
    affine_path = datapath + '/' + 'diff2struct_affine.mat'

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    sigmas = [3.0, 1.0, 0.0]
    level_iters = [10000, 1000, 100]
    factors = [4, 2, 1]
    affreg_diff2struct = AffineRegistration(metric=metric,
                                            level_iters=level_iters,
                                            sigmas=sigmas,
                                            factors=factors)

    transform = AffineTransform3D()
    params0 = None

    affine_diff2struct = affreg_diff2struct.optimize(static,
                                                     moving,
                                                     transform,
                                                     params0,
                                                     static_grid2world,
                                                     moving_grid2world,
                                                     starting_affine=None)

    saveAffineMat(affine_diff2struct, affine_path)

    # struct2standard affine registartion

    moving = t1_img
    moving_grid2world = t1_affine
    static = template_img
    static_grid2world = template_affine

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    sigmas = [3.0, 1.0, 0.0]
    level_iters = [10000, 1000, 100]
    factors = [4, 2, 1]
    affreg_struct2standard = AffineRegistration(metric=metric,
                                                level_iters=level_iters,
                                                sigmas=sigmas,
                                                factors=factors)

    transform = AffineTransform3D()
    params0 = None
    affine_struct2standard = affreg_struct2standard.optimize(
        static,
        moving,
        transform,
        params0,
        static_grid2world,
        moving_grid2world,
        starting_affine=None)

    # struct2standard SyN registartion
    pre_align = affine_struct2standard.get_affine()
    metric = CCMetric(3)
    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_grid2world,
                           moving_grid2world, pre_align)

    warped = mapping.transform_inverse(template_img)
    warped = affine_diff2struct.transform_inverse(warped)
    template_diff_path = datapath + '/' + 'MNI152_diff'
    save_nifti(template_diff_path, warped, b0_affine)

    warped_roi = mapping.transform_inverse(roi_img)
    warped_roi = affine_diff2struct.transform_inverse(warped_roi)
    roi_diff_path = datapath + '/' + roi + '_diff.nii.gz'
    save_nifti(roi_diff_path, warped_roi, b0_affine)

    print("  Done!  ")
Example #23
0
                              t1_m_grid2world,
                              starting_affine=c_of_mass.affine)

# rigid body transform (translation + rotation)
rigid = affreg.optimize(t1_s,
                        t1_m,
                        RigidTransform3D(),
                        None,
                        t1_s_grid2world,
                        t1_m_grid2world,
                        starting_affine=translation.affine)

# affine transform (translation + rotation + scaling)
affine = affreg.optimize(t1_s,
                         t1_m,
                         AffineTransform3D(),
                         None,
                         t1_s_grid2world,
                         t1_m_grid2world,
                         starting_affine=rigid.affine)

# apply affine transformation
t1_m_affine = affine.transform(t1_m)

###############################################################################
# Compute Symmetric Diffeomorphic Registration

# set up Symmetric Diffeomorphic Registration (metric, iterations per level)
sdr = SymmetricDiffeomorphicRegistration(CCMetric(3), niter_sdr)

# compute mapping
Example #24
0
.. figure:: transformed_rigid_0.png
   :align: center
.. figure:: transformed_rigid_1.png
   :align: center
.. figure:: transformed_rigid_2.png
   :align: center

   Registration result with a rigid transform, using Mutual Information.
"""
"""
Finally, lets refine with a full affine transform (translation, rotation, scale
and shear), it is safer to fit more degrees of freedom now, since we must be
very close to the optimal transform
"""

transform = AffineTransform3D()
params0 = None
starting_affine = rigid.affine
affine = affreg.optimize(static,
                         moving,
                         transform,
                         params0,
                         static_grid2world,
                         moving_grid2world,
                         starting_affine=starting_affine)
"""
This results in a slight shear and scale
"""

transformed = affine.transform(moving)
regtools.overlay_slices(static, transformed, None, 0, "Static", "Transformed",
Example #25
0
def registration_proxy(in_file, static, out_file):
    """
    http://nipy.org/dipy/examples_built/affine_registration_3d.html
    in_file --> moving
    
    static and moving = path 
    
    """
    import time
    import numpy as np
    import nibabel as nb

    import matplotlib.pyplot as plt
    from dipy.viz import regtools

    from dipy.align.imaffine import (transform_centers_of_mass, AffineMap,
                                     MutualInformationMetric,
                                     AffineRegistration)
    from dipy.align.transforms import (TranslationTransform3D,
                                       RigidTransform3D, AffineTransform3D)

    t0_time = time.time()

    print('---> I. Translation of the moving image towards the static image')

    #condition if we have a path or a nifti file

    static_img = nb.load(static)
    static = static_img.get_data()
    static_grid2world = static_img.affine

    moving_img = nb.load(in_file)
    moving = np.array(moving_img.get_data())[..., 0]
    moving_grid2world = moving_img.affine

    # resample for have the same number of voxels

    print(
        '---> Resembling the moving image on a grid of the same dimensions as the static image'
    )

    identity = np.eye(4)
    affine_map = AffineMap(identity, static.shape, static_grid2world,
                           moving.shape, moving_grid2world)
    resampled = affine_map.transform(moving)
    regtools.overlay_slices(static, resampled, None, 0, "Static", "Moving",
                            "resampled_0.png")

    regtools.overlay_slices(static, resampled, None, 1, "Static", "Moving",
                            "resampled_1.png")

    regtools.overlay_slices(static, resampled, None, 2, "Static", "Moving",
                            "resampled_2.png")
    plt.show()

    #centers of mass registration

    print('---> Aligning the centers of mass of the two images')

    c_of_mass = transform_centers_of_mass(static, static_grid2world, moving,
                                          moving_grid2world)
    transformed = c_of_mass.transform(moving)

    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_com_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_com_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_com_2.png")
    plt.show()

    print('---> II. Refine  by looking for an affine transform')

    #affine transform
    #parameters??
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]

    print('---> Computing Affine Registration (non-convex optimization)')

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(static,
                                  moving,
                                  transform,
                                  params0,
                                  static_grid2world,
                                  moving_grid2world,
                                  starting_affine=starting_affine)

    transformed = translation.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_trans_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_trans_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_trans_2.png")
    plt.show()

    print('--->III. Refining with a rigid transform')

    #rigid transform

    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(static,
                            moving,
                            transform,
                            params0,
                            static_grid2world,
                            moving_grid2world,
                            starting_affine=starting_affine)

    transformed = rigid.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_rigid_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_rigid_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_rigid_2.png")
    plt.show()

    print(
        '--->IV. Refining with a full afine transform (translation, rotation, scale and shear)'
    )

    #full affine transform

    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(static,
                             moving,
                             transform,
                             params0,
                             static_grid2world,
                             moving_grid2world,
                             starting_affine=starting_affine)

    transformed = affine.transform(moving)
    regtools.overlay_slices(static, transformed, None, 0, "Static",
                            "Transformed", "transformed_affine_0.png")
    regtools.overlay_slices(static, transformed, None, 1, "Static",
                            "Transformed", "transformed_affine_1.png")
    regtools.overlay_slices(static, transformed, None, 2, "Static",
                            "Transformed", "transformed_affine_2.png")
    plt.show()

    # Save the new data in a new NIfTI image
    nb.Nifti1Image(transformed, static_img.affine).to_filename(out_file)

    #name = os.path.splitext(basename(moving_path))[0] + '_affine_reg'
    #nib.save(nib.Nifti1Image(transformed, np.eye(4)), name)
    t1_time = time.time()
    total_time = t1_time - t0_time
    print('Total time:' + str(total_time))
    print('Translated file now is here: %s' % out_file)
    return print('Successfully affine registration applied')