Ejemplo n.º 1
0
 def estimate_translation3d(self, fixed, moving):
     assert len(moving.shape) == len(fixed.shape)
     tx_tr = self.estimate_translation2d(fixed.mean(axis=0),
                                         moving.mean(axis=0))
     tx_tr = tx_tr.affine
     tmp = np.eye(4)
     tmp[1:, 1:] = tx_tr
     trans = TranslationTransform3D()
     if self.update_map:
         self.metric = MutualInformationMetric(self.nbins,
                                               self.sampling_prop)
         self.affmap = AffineRegistration(
             metric=self.metric,
             level_iters=self.level_iters,
             sigmas=self.sigmas,
             factors=self.factors,
             method=self.method,
             ss_sigma_factor=self.ss_sigma_factor,
             options=self.options,
             verbosity=self.verbosity)
     return self.affmap.optimize(fixed,
                                 moving,
                                 trans,
                                 self.params0,
                                 starting_affine=tmp)
Ejemplo n.º 2
0
    def estimate_rigidxy(self, fixed, moving, tx_tr=None):
        assert len(moving.shape) == len(fixed.shape)
        trans = TranslationTransform3D()
        if self.update_map:
            self.metric = MutualInformationMetric(self.nbins,
                                                  self.sampling_prop)
            self.affmap = AffineRegistration(
                metric=self.metric,
                level_iters=self.level_iters,
                sigmas=self.sigmas,
                factors=self.factors,
                method=self.method,
                ss_sigma_factor=self.ss_sigma_factor,
                options=self.options,
                verbosity=self.verbosity)
        if tx_tr is None:
            tmp = self.estimate_rigid2d(fixed.mean(axis=0),
                                        moving.mean(axis=0))
            tmp = tmp.affine
            tx_tr = np.eye(4)
            tx_tr[1:, 1:] = tmp
        if isinstance(tx_tr, AffineMap):
            tx_tr = tx_tr.affine

        trans2d = AffineMap(tx_tr,
                            domain_grid_shape=fixed.shape,
                            codomain_grid_shape=moving.shape)
        moving_ = trans2d.transform(fixed)
        transz = self.affmap.optimize(moving_, moving, trans, self.params0)
        print(transz.affine)
        tx_tr[0, 3] = transz.affine[0, 3]
        return AffineMap(tx_tr,
                         domain_grid_shape=fixed.shape,
                         codomain_grid_shape=moving.shape)
Ejemplo n.º 3
0
    def __init__(self):
        self.nbins = 32
        self.sampling_prop = None  #.25
        self.level_iters = [1000, 500, 250, 125]
        self.factors = [8, 4, 2, 1]
        self.sigmas = [3.0, 2.0, 1.0, 0.0]
        self.ss_sigma_factor = None
        self.verbosity = 0
        self.tx_mat = None
        self.params0 = None

        self.options = {
            'maxcor': 10,
            'ftol': 1e-7,
            'gtol': 1e-5,
            'eps': 1e-8,
            'maxiter': 1000,
            'disp': True
        }
        # ftol: The iteration stops when (f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol.
        # gtol: The iteration will stop when max{|proj g_i | i = 1, ..., n} <= gtol where pg_i is the i-th component of the projected gradient.
        # eps: Step size used for numerical approximation of the jacobian.
        # disp: Set to True to print convergence messages.

        self.method = 'L-BFGS-B'
        self.metric = MutualInformationMetric(self.nbins, self.sampling_prop)
        self.affmap = AffineRegistration(metric=self.metric,
                                         level_iters=self.level_iters,
                                         sigmas=self.sigmas,
                                         factors=self.factors,
                                         method=self.method,
                                         ss_sigma_factor=self.ss_sigma_factor,
                                         options=self.options,
                                         verbosity=self.verbosity)
        self.update_map = True
Ejemplo n.º 4
0
def translation_transform(static, moving, static_grid2world, moving_grid2world,
                          nbins, sampling_prop, metric, level_iters, sigmas,
                          factors, starting_affine):

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10000, 1000, 100]

    sigmas = [3.0, 1.0, 0.0]

    factors = [4, 2, 1]

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None

    translation = affreg.optimize(static,
                                  moving,
                                  transform,
                                  params0,
                                  static_grid2world,
                                  moving_grid2world,
                                  starting_affine=starting_affine)

    return translation
Ejemplo n.º 5
0
def fine_alignment(static, moving, starting_affine=None):
    """Use mutual information to align two images.

    Parameters
    ----------
    static : array
        The reference image.
    moving : array
        The moving image.
    starting_affine : array
        A proposed initial transformation

    Returns
    -------
    img_warp : array
        The moving image warped towards the static image
    affine : array
        The affine transformation for this warping

    """
    metric = MutualInformationMetric()
    reggy = AffineRegistration(metric=metric)
    transform = AffineTransform2D()
    affine = reggy.optimize(static,
                            moving,
                            transform,
                            None,
                            starting_affine=starting_affine)
    img_warp = affine.transform(moving)
    return img_warp, affine.affine
Ejemplo n.º 6
0
 def estimate_affine2d(self, fixed, moving, tx_tr=None):
     assert len(moving.shape) == len(fixed.shape)
     trans = AffineTransform3D()
     if self.update_map:
         self.metric = MutualInformationMetric(self.nbins,
                                               self.sampling_prop)
         self.affmap = AffineRegistration(
             metric=self.metric,
             level_iters=self.level_iters,
             sigmas=self.sigmas,
             factors=self.factors,
             method=self.method,
             ss_sigma_factor=self.ss_sigma_factor,
             options=self.options,
             verbosity=self.verbosity)
     if tx_tr is None:
         self.update_map = False
         tx_tr = self.estimate_rigid2d(fixed, moving)
         self.update_map = True
     if isinstance(tx_tr, AffineMap):
         tx_tr = tx_tr.affine
     return self.affmap.optimize(fixed,
                                 moving,
                                 trans,
                                 self.params0,
                                 starting_affine=tx_tr)
Ejemplo n.º 7
0
def register_image(static,
                   static_grid2world,
                   moving,
                   moving_grid2world,
                   transformation_type='affine',
                   dwi=None):
    if transformation_type not in ['rigid', 'affine']:
        raise ValueError('Transformation type not available in Dipy')

    # Set all parameters for registration
    nbins = 32
    params0 = None
    sampling_prop = None
    level_iters = [50, 25, 5]
    sigmas = [8.0, 4.0, 2.0]
    factors = [8, 4, 2]
    metric = MutualInformationMetric(nbins, sampling_prop)
    reg_obj = AffineRegistration(metric=metric,
                                 level_iters=level_iters,
                                 sigmas=sigmas,
                                 factors=factors,
                                 verbosity=0)

    # First, align the center of mass of both volume
    c_of_mass = transform_centers_of_mass(static, static_grid2world, moving,
                                          moving_grid2world)
    # Then, rigid transformation (translation + rotation)
    transform = RigidTransform3D()
    rigid = reg_obj.optimize(static,
                             moving,
                             transform,
                             params0,
                             static_grid2world,
                             moving_grid2world,
                             starting_affine=c_of_mass.affine)

    if transformation_type == 'affine':
        # Finally, affine transformation (translation + rotation + scaling)
        transform = AffineTransform3D()
        affine = reg_obj.optimize(static,
                                  moving,
                                  transform,
                                  params0,
                                  static_grid2world,
                                  moving_grid2world,
                                  starting_affine=rigid.affine)

        mapper = affine
        transformation = affine.affine
    else:
        mapper = rigid
        transformation = rigid.affine

    if dwi is not None:
        trans_dwi = transform_dwi(mapper, static, dwi)
        return trans_dwi, transformation
    else:
        return mapper.transform(moving), transformation
Ejemplo n.º 8
0
def rigid_registration(fixed, moving, metric=MutualInformationMetric(), **kwargs):
    affreg = AffineRegistration(metric=metric, **kwargs)
    rigid = affreg.optimize(fixed.get_data(), moving.get_data(),
                            RigidTransform3D(), None,
                            fixed.affine, moving.affine)
    transformed = rigid.transform(moving.get_data())
    # convert transformed to full nibabel image, just data now
    # TODO: get affine for transformed from rigid.affine?
    transformed = new_img_like(moving, transformed)
    return transformed
Ejemplo n.º 9
0
def dipy_align(static, static_grid2world, moving, moving_grid2world, prealign=None):
    r""" Full rigid registration with Dipy's imaffine module
    
    Here we implement an extra optimization heuristic: move the geometric
    centers of the images to the origin. Imaffine does not do this by default
    because we want to give the user as much control of the optimization
    process as possible.

    """
    # Bring the center of the moving image to the origin
    c_moving = tuple(0.5 * np.array(moving.shape, dtype=np.float64))
    c_moving = moving_grid2world.dot(c_moving + (1,))
    correction_moving = np.eye(4, dtype=np.float64)
    correction_moving[:3, 3] = -1 * c_moving[:3]
    centered_moving_aff = correction_moving.dot(moving_grid2world)

    # Bring the center of the static image to the origin
    c_static = tuple(0.5 * np.array(static.shape, dtype=np.float64))
    c_static = static_grid2world.dot(c_static + (1,))
    correction_static = np.eye(4, dtype=np.float64)
    correction_static[:3, 3] = -1 * c_static[:3]
    centered_static_aff = correction_static.dot(static_grid2world)

    dim = len(static.shape)
    metric = MutualInformationMetric(nbins=32, sampling_proportion=0.3)
    level_iters = [10000, 1000, 100]
    affr = AffineRegistration(metric=metric, level_iters=level_iters)
    affr.verbosity = VerbosityLevels.DEBUG
    # metric.verbosity = VerbosityLevels.DEBUG

    # Registration schedule: center-of-mass then translation, then rigid and then affine
    if prealign is None:
        prealign = "mass"
    transforms = ["TRANSLATION", "RIGID", "AFFINE"]

    sol = np.eye(dim + 1)
    for transform_name in transforms:
        transform = regtransforms[(transform_name, dim)]
        print("Optimizing: %s" % (transform_name,))
        x0 = None
        sol = affr.optimize(
            static, moving, transform, x0, centered_static_aff, centered_moving_aff, starting_affine=prealign
        )
        prealign = sol.affine.copy()

    # Now bring the geometric centers back to their original location
    fixed = np.linalg.inv(correction_moving).dot(sol.affine.dot(correction_static))
    sol.set_affine(fixed)
    sol.domain_grid2world = static_grid2world
    sol.codomain_grid2world = moving_grid2world

    return sol
Ejemplo n.º 10
0
def estimate_translation(
        fixed,
        moving,
        metric_sampling=1.0,
        factors=(4, 2, 1),
        level_iters=(1000, 1000, 1000),
        sigmas=(8, 4, 1),
):
    """
    Estimate translation between 2D or 3D images using dipy.align.

    Parameters
    ----------
    fixed : numpy array, 2D or 3D
        The reference image.

    moving : numpy array, 2D or 3D
        The image to be transformed.

    metric_sampling : float, within the interval (0,  1]
        Fraction of the metric sampling to use for optimization

    factors : iterable
        The image pyramid factors to use

    level_iters : iterable
        Number of iterations per pyramid level

    sigmas : iterable
        Standard deviation of gaussian blurring for each pyramid level

    """
    from dipy.align.transforms import TranslationTransform2D, TranslationTransform3D
    from dipy.align.imaffine import AffineRegistration, MutualInformationMetric

    metric = MutualInformationMetric(32, metric_sampling)
    affreg = AffineRegistration(
        metric=metric,
        level_iters=level_iters,
        sigmas=sigmas,
        factors=factors,
        verbosity=0,
    )

    if fixed.ndim == 2:
        transform = TranslationTransform2D()
    elif fixed.ndim == 3:
        transform = TranslationTransform3D()

    tx = affreg.optimize(fixed, moving, transform, params0=None)

    return tx
Ejemplo n.º 11
0
def register(metric_name, static, moving, static_grid2space, moving_grid2space):
    if metric_name == "LCC":
        from dipy.align.imaffine import LocalCCMetric

        radius = 4
        metric = LocalCCMetric(radius)
    elif metric_name == "MI":
        nbins = 32
        sampling_prop = None
        metric = MattesMIMetric(nbins, sampling_prop)
    else:
        raise ValueError("Unknown metric " + metric_name)

    align_centers = True
    # schedule = ['TRANSLATION', 'RIGID', 'AFFINE']
    schedule = ["TRANSLATION", "RIGID"]
    if True:
        level_iters = [100, 100, 100]
        sigmas = [3.0, 1.0, 0.0]
        factors = [4, 2, 1]
    else:
        level_iters = [100]
        sigmas = [0.0]
        factors = [1]

    affreg = AffineRegistration(metric=metric, level_iters=level_iters, sigmas=sigmas, factors=factors)

    out = np.eye(4)
    if align_centers:
        print("Aligning centers of mass")
        c_static = ndimage.measurements.center_of_mass(np.array(static))
        c_static = static_grid2space.dot(c_static + (1,))
        original_static = static_grid2space.copy()
        static_grid2space = static_grid2space.copy()
        static_grid2space[:3, 3] -= c_static[:3]
        out = align_centers_of_mass(static, static_grid2space, moving, moving_grid2space)

    for step in schedule:
        print("Optimizing: %s" % (step,))
        transform = regtransforms[(step, 3)]
        params0 = None
        out = affreg.optimize(
            static, moving, transform, params0, static_grid2space, moving_grid2space, starting_affine=out
        )
    if align_centers:
        print("Updating center-of-mass reference")
        T = np.eye(4)
        T[:3, 3] = -1 * c_static[:3]
        out = out.dot(T)
    return out
def align_xmap_np(static,
                  moving,
                  static_grid2world=np.eye(4),
                  moving_grid2world=np.eye(4)):
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10000, 1000, 100]

    sigmas = [3.0, 1.0, 0.0]

    factors = [4, 2, 1]

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors,
                                verbosity=0)
    transform = RigidTransform3D()
    params0 = None

    # starting_affine = transform_centers_of_mass(static,
    #                                             static_grid2world,
    #                                             moving,
    #                                             moving_grid2world,
    #                                             ).affine
    # print("\tCOM affine transform is: {}".format(starting_affine))

    starting_affine = np.eye(4)

    rigid = affreg.optimize(
        static,
        moving,
        transform,
        params0,
        static_grid2world,
        moving_grid2world,
        starting_affine=starting_affine,

        # verbosity=0,
    )

    # Transform
    transformed = rigid.transform(moving)
    # print("\tThe transform is: {}".format(rigid.affine))

    return transformed
Ejemplo n.º 13
0
def affine_registration(moving,
                        static,
                        moving_affine=None,
                        static_affine=None,
                        nbins=32,
                        sampling_prop=None,
                        metric='MI',
                        pipeline=[c_of_mass, translation, rigid, affine],
                        level_iters=[10000, 1000, 100],
                        sigmas=[5.0, 2.5, 0.0],
                        factors=[4, 2, 1],
                        params0=None):
    """
    Find the affine transformation between two 3D images.

    Parameters
    ----------

    """
    # Define the Affine registration object we'll use with the chosen metric:
    use_metric = affine_metric_dict[metric](nbins, sampling_prop)
    affreg = AffineRegistration(metric=use_metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    # Bootstrap this thing with the identity:
    starting_affine = np.eye(4)
    # Go through the selected transformation:
    for func in pipeline:
        transformed, starting_affine = func(moving, static, static_affine,
                                            moving_affine, affreg,
                                            starting_affine, params0)
    return transformed, starting_affine
Ejemplo n.º 14
0
 def estimate_translation2d(self, fixed, moving):
     assert len(moving.shape) == len(fixed.shape)
     trans = TranslationTransform2D()
     if self.update_map:
         self.metric = MutualInformationMetric(self.nbins,
                                               self.sampling_prop)
         self.affmap = AffineRegistration(
             metric=self.metric,
             level_iters=self.level_iters,
             sigmas=self.sigmas,
             factors=self.factors,
             method=self.method,
             ss_sigma_factor=self.ss_sigma_factor,
             options=self.options,
             verbosity=self.verbosity)
     return self.affmap.optimize(fixed, moving, trans, self.params0)
Ejemplo n.º 15
0
def mutualInfo_dipy(img1, img2):
    img1_grid2world = np.identity(3)
    img2_grid2world = np.identity(3)

    # compute center of mass
    c_of_mass = transform_centers_of_mass(img1, img1_grid2world, img2,
                                          img2_grid2world)

    x_shift = c_of_mass.affine[1, -1]
    y_shift = c_of_mass.affine[0, -1]

    # prepare affine registration
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    # translation
    translation = affreg.optimize(img1,
                                  img2,
                                  TranslationTransform2D(),
                                  None,
                                  img1_grid2world,
                                  img2_grid2world,
                                  starting_affine=c_of_mass.affine)

    # rotation
    # ~ rigid = affreg.optimize(im1, im2, RigidTransform2D(), None,
    # ~ im1_grid2world, im2_grid2world,
    # ~ starting_affine=translation.affine)
    # ~ transformed = rigid.transform(im2)

    # ~ # resize, shear
    # ~ affine = affreg.optimize(im1, im2, AffineTransform2D(), None,
    # ~ im1_grid2world, im2_grid2world,
    # ~ starting_affine=rigid.affine)

    x_shift = translation.affine[1, -1]
    y_shift = translation.affine[0, -1]

    return np.asarray([-x_shift, -y_shift])
Ejemplo n.º 16
0
def affine_transform(static, moving, static_grid2world, moving_grid2world,
                     nbins, sampling_prop, metric, level_iters, sigmas,
                     factors, starting_affine):

    transform = AffineTransform3D()
    params0 = None
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    affine = affreg.optimize(static,
                             moving,
                             transform,
                             params0,
                             static_grid2world,
                             moving_grid2world,
                             starting_affine=starting_affine)

    return affine
Ejemplo n.º 17
0
def transform_affine(static, moving):
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)

    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = AffineTransform3D()
    params0 = None
    affine = affreg.optimize(static, moving, transform, params0, None, None,
                             None)
    transformed = affine.transform(moving)

    return transformed
Ejemplo n.º 18
0
def register_affine(t_masked,
                    m_masked,
                    affreg=None,
                    final_iters=(10000, 1000, 100)):
    """ Run affine registration between images `t_masked`, `m_masked`

    Parameters
    ----------
    t_masked : image
        Template image object, with image data masked to set out-of-brain
        voxels to zero.
    m_masked : image
        Moving (individual) image object, with image data masked to set
        out-of-brain voxels to zero.
    affreg : None or AffineRegistration instance, optional
        AffineRegistration with which to register `m_masked` to `t_masked`.  If
        None, we make an instance with default parameters.
    final_iters : tuple, optional
        Length 3 tuple of level iterations to use on final affine pass of the
        registration.

    Returns
    -------
    affine : shape (4, 4) ndarray
        Final affine mapping from voxels in `t_masked` to voxels in `m_masked`.
    """
    if affreg is None:
        metric = MutualInformationMetric(nbins=32, sampling_proportion=None)
        affreg = AffineRegistration(metric=metric)
    t_data = t_masked.get_data().astype(float)
    m_data = m_masked.get_data().astype(float)
    t_aff = t_masked.affine
    m_aff = m_masked.affine
    translation = affreg.optimize(t_data, m_data, TranslationTransform3D(),
                                  None, t_aff, m_aff)
    rigid = affreg.optimize(t_data,
                            m_data,
                            RigidTransform3D(),
                            None,
                            t_aff,
                            m_aff,
                            starting_affine=translation.affine)
    # Maybe bump up iterations for last step
    if final_iters is not None:
        affreg.level_iters = list(final_iters)
    affine = affreg.optimize(t_data,
                             m_data,
                             AffineTransform3D(),
                             None,
                             t_aff,
                             m_aff,
                             starting_affine=rigid.affine)
    return affine.affine
Ejemplo n.º 19
0
def affine_registration(moving, static,
                        moving_grid2world=None,
                        static_grid2world=None,
                        nbins=32,
                        sampling_prop=None,
                        metric='MI',
                        pipeline=[c_of_mass, translation, rigid, affine],
                        level_iters=[10000, 1000, 100],
                        sigmas=[3.0, 1.0, 0.0],
                        factors=[4, 2, 1],
                        params0=None):
    """
    Find the affine transformation between two 3D images
    """

    if len(moving.shape) == 4:
        data=moving
        moving = moving[:,:,:,0]

    # Define the Affine registration object we'll use with the chosen metric:
    use_metric = affine_metric_dict[metric](nbins, sampling_prop)
    affreg = AffineRegistration(metric=use_metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)
    
    # Bootstrap this thing with the identity:
    starting_affine = np.eye(4)
    # Go through the selected transformation:

    for func in pipeline:
        transformed, starting_affine, transform = func(moving, static,
                                    static_grid2world,
                                    moving_grid2world,
                                    affreg, starting_affine,
                                    params0)
    try:
        transformed = np.zeros((static.shape[0], static.shape[1], static.shape[2], data.shape[-1]))
        for volume in range(data.shape[-1]):
            transformed[:,:,:,volume]=transform.transform(data[:,:,:,volume])
    except:
        print("NO Data time")


    return transformed, starting_affine
def setup_dipy_register(nbins=50,
                        metric='mutualinfo',
                        sampling_prop=None,
                        level_iters=[10000, 1000, 100],
                        sigmas=[3.0, 1.0, 0.0],
                        factors=[4, 2, 1]):

    static_grid2world = np.eye(4)
    moving_grid2world = np.eye(4)

    if metric == 'mutualinfo':
        metric = MutualInformationMetric(nbins, sampling_prop)

    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    return (static_grid2world, moving_grid2world), affreg
Ejemplo n.º 21
0
def get_affine_registration(level_iters) -> AffineRegistration:
    """
    """
    # The number of bins used determines how sensitive the measurement of entropy is to variance in the voxel intensity
    # A small number of bins decreases sensitivity
    n_bins = 128

    # No sampling prop is used
    sampling_prop = None
    metric = MutualInformationMetric(n_bins, sampling_prop)

    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]

    aff_reg = AffineRegistration(
        metric=metric, level_iters=level_iters, sigmas=sigmas, factors=factors
    )

    return aff_reg
Ejemplo n.º 22
0
def setup_affine(metric = None, level_iters = None , sigmas = None, \
                 factors = None, method = 'L-BFGS-B'):
    """
    Sets up the Gaussian Pyramid used in multi-resolution image registration
    and initializes and instance of the AffineRegistration class in dipy.

    Parameters
    ----------    
    metric : None or object, optional 
        If none, Mutual Information Metric will be used with default settings.
        Can set up with specific nbins and sampling proportion with 
        setup_mutualinformation function.
    level_iters : sequence of integers, optional 
        number of iterations at each level of pyramid. If none, the iterations
        will be [10000, 1000, 100]. 
    sigmas : sequence of floats, optional 
        smoothing paramter for each level of pyramid. Default sequence is
        [3, 1, 0], which means the image at the coarsest (see factors) level
        is smoothed the most and the image at the finest level is not smoothed.
    factors: sequence of floats, optional 
        sub-sampling factors in Gaussian pyramid. Default is [4, 2, 1] which 
        means the image at the coarsest level is a quarter the resolution and
        the image at the finest level is the original resolution. 
    method : string, optional
        optimization method used in registration. Default is L-BFGS-B but
        any gradient-based method in dipy.core.Optimize such as CG, BFGS, 
        Newton-CG, dogleg, or trust-ncg are available. 
 
    Returns
    -------
    affreg : class 
        
    """
    affreg = AffineRegistration(metric=metric, level_iters=level_iters, \
                                sigmas=sigmas, factors=factors, \
                                method = method)
    return affreg
Ejemplo n.º 23
0
def registration(ref, moving, ref_mask=None, moving_mask=None):
    ref_mask_data, mov_mask_data = None, None
    ref_data = ref.get_fdata()
    if ref_mask:
        ref_mask_data = ref_mask.get_fdata() > 0.5
    mov_data = moving.get_fdata()
    if moving_mask:
        mov_mask_data = moving_mask.get_fdata() > 0.5

    metric = MutualInformationMetric(nbins=32, sampling_proportion=None)
    transform = RigidTransform3D()
    affreg = AffineRegistration(
        metric=metric, level_iters=[10000, 1000, 0], factors=[6, 4, 2], sigmas=[4, 2, 0]
    )
    rigid = affreg.optimize(
        ref_data,
        mov_data,
        transform,
        None,
        ref.affine,
        moving.affine,
        starting_affine="mass",
        static_mask=ref_mask_data,
        moving_mask=mov_mask_data,
    )

    affreg = AffineRegistration(
        metric=metric, level_iters=[10000, 1000, 0], factors=[4, 2, 2], sigmas=[4, 2, 0]
    )
    transform = RigidScalingTransform3D()
    # transform = AffineTransform3D()
    return affreg.optimize(
        ref_data,
        mov_data,
        transform,
        None,
        ref.affine,
        moving.affine,
        starting_affine=rigid.affine,
        static_mask=ref_mask_data,
        moving_mask=mov_mask_data,
    )
Ejemplo n.º 24
0
"""

nbins = 32
sampling_prop = None
metric = MutualInformationMetric(nbins, sampling_prop)
"""
As well as the optimization strategy:

"""

level_iters = [10, 10, 5]
sigmas = [3.0, 1.0, 0.0]
factors = [4, 2, 1]
affine_reg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)
transform = TranslationTransform3D()

params0 = None
translation = affine_reg.optimize(static, moving, transform, params0,
                                  static_affine, moving_affine)
transformed = translation.transform(moving)
transform = RigidTransform3D()

rigid_map = affine_reg.optimize(static,
                                moving,
                                transform,
                                params0,
                                static_affine,
                                moving_affine,
Ejemplo n.º 25
0
which means that, if the original image shape was (nx, ny, nz) voxels, then the
shape of the coarsest image will be about (nx//4, ny//4, nz//4), the shape in
the middle resolution will be about (nx//2, ny//2, nz//2) and the image at the
finest scale has the same size as the original image. This set of factors is
the default
"""

factors = [4, 2, 1]

"""
Now we go ahead and instantiate the registration class with the configuration
we just prepared
"""

affreg = AffineRegistration(metric=metric,
                            level_iters=level_iters,
                            sigmas=sigmas,
                            factors=factors)

"""
Using AffineRegistration we can register our images in as many stages as we
want, providing previous results as initialization for the next (the same logic
as in ANTS). The reason why it is useful is that registration is a non-convex
optimization problem (it may have more than one local optima), which means that
it is very important to initialize as close to the solution as possible. For
example, lets start with our (previously computed) rough transformation
aligning the centers of mass of our images, and then refine it in three stages.
First look for an optimal translation. The dictionary regtransforms contains
all available transforms, we obtain one of them by providing its name and the
dimension (either 2 or 3) of the image we are working with (since we are
aligning volumes, the dimension is 3)
"""
Ejemplo n.º 26
0
def warp_syn_dipy(static_fname, moving_fname):
    import os
    import numpy as np
    import nibabel as nb
    from dipy.align.metrics import CCMetric
    from dipy.align.imaffine import (transform_centers_of_mass, AffineMap,
                                     MutualInformationMetric,
                                     AffineRegistration)
    from dipy.align.transforms import (TranslationTransform3D,
                                       RigidTransform3D, AffineTransform3D)
    from dipy.align.imwarp import (DiffeomorphicMap,
                                   SymmetricDiffeomorphicRegistration)

    from nipype.utils.filemanip import fname_presuffix

    static = nb.load(static_fname)
    moving = nb.load(moving_fname)

    c_of_mass = transform_centers_of_mass(static.get_data(), static.affine,
                                          moving.get_data(), moving.affine)
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)
    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(static.get_data(),
                                  moving.get_data(),
                                  transform,
                                  params0,
                                  static.affine,
                                  moving.affine,
                                  starting_affine=starting_affine)

    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(static.get_data(),
                            moving.get_data(),
                            transform,
                            params0,
                            static.affine,
                            moving.affine,
                            starting_affine=starting_affine)
    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(static.get_data(),
                             moving.get_data(),
                             transform,
                             params0,
                             static.affine,
                             moving.affine,
                             starting_affine=starting_affine)

    metric = CCMetric(3, sigma_diff=3.)
    level_iters = [25, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
    starting_affine = affine.affine
    mapping = sdr.optimize(static.get_data(), moving.get_data(), static.affine,
                           moving.affine, starting_affine)

    warped_filename = os.path.abspath(
        fname_presuffix(moving_fname,
                        newpath='./',
                        suffix='_warped',
                        use_ext=True))
    warped = nb.Nifti1Image(mapping.transform(moving.get_data()),
                            static.affine)
    warped.to_filename(warped_filename)

    warp_filename = os.path.abspath(
        fname_presuffix(moving_fname,
                        newpath='./',
                        suffix='_warp.npz',
                        use_ext=False))
    np.savez(warp_filename,
             prealign=mapping.prealign,
             forward=mapping.forward,
             backward=mapping.backward)

    return warp_filename, warped_filename
Ejemplo n.º 27
0
"""
"""
Let's make some registration settings.
"""

nbins = 32
sampling_prop = None
metric = MutualInformationMetric(nbins, sampling_prop)

# small number of iterations for this example
level_iters = [100, 10]
sigmas = [1.0, 0.0]
factors = [2, 1]

affreg = AffineRegistration(metric=metric,
                            level_iters=level_iters,
                            sigmas=sigmas,
                            factors=factors)
"""
Now let's register these volumes together without any masking. For the purposes
of this example, we will not provide an inital transformation based on centre
of mass, but this would work fine with masks.

Note that use of masks is not currently implemented for sparse sampling.

"""

transform = TranslationTransform3D()
transl = affreg.optimize(static,
                         moving,
                         transform,
                         None,
Ejemplo n.º 28
0
def gm_network(mr_filename, gm_filename, at_filename, template_mr):

    new_gmfilename = os.path.abspath(gm_filename).replace(".nii", "_mni.nii")
    new_atfilename = os.path.abspath(at_filename).replace(".nii", "_mni.nii")

    networks = 0

    if not ((os.path.isfile(new_gmfilename)) or
            (os.path.islink(new_gmfilename))):

        print('file {} does not exist'.format(new_gmfilename))
        print('performing registration to MNI ... ', end='')
        start = time.process_time()

        # see if we can maybe find a transform
        tf_filename = os.path.abspath(gm_filename).split(
            '.nii')[0] + "_reg.npz"
        if not ((os.path.isfile(tf_filename)) or
                (os.path.islink(tf_filename))):

            # see https://dipy.org/documentation/1.2.0./examples_built/ ..
            #           .. affine_registration_3d/#example-affine-registration-3d
            static, static_affine = load_nifti(template_mr)
            moving, moving_affine = load_nifti(mr_filename)
            grey, grey_affine = load_nifti(gm_filename)
            atl, atl_affine = load_nifti(at_filename)

            # first initialise by putting centres of mass on top of each other
            c_of_mass = transform_centers_of_mass(static, static_affine,
                                                  moving, moving_affine)

            # initialise transform parameters (e.g. the mutual information criterion)
            # these parameters won' need to be changed between the different stages
            nbins = 64
            sampling_prop = None
            metric = MutualInformationMetric(nbins, sampling_prop)
            level_iters = [25, 15, 5]
            sigmas = [2, 1, 0]
            factors = [4, 2, 1]
            affreg = AffineRegistration(metric=metric,
                                        level_iters=level_iters,
                                        sigmas=sigmas,
                                        factors=factors)

            # give slightly more degrees of freedom, by allowing translation of centre of gravity
            print('\nTranslation only:')
            transform = TranslationTransform3D()
            params0 = None
            translation = affreg.optimize(static,
                                          moving,
                                          transform,
                                          params0,
                                          static_affine,
                                          moving_affine,
                                          starting_affine=c_of_mass.affine)

            # refine further by allowing all rigid transforms (rotations/translations around the centre of gravity)
            print('Rigid transform:')
            transform = RigidTransform3D()
            params0 = None
            rigid = affreg.optimize(static,
                                    moving,
                                    transform,
                                    params0,
                                    static_affine,
                                    moving_affine,
                                    starting_affine=translation.affine)

            full_affine = False
            # the GM networks method is based on keeping the cortical shape intact

            if (full_affine):

                # refine to a full affine transform by adding scaling and shearing
                print('Affine transform:')
                transform = AffineTransform3D()
                params0 = None
                affine = affreg.optimize(static,
                                         moving,
                                         transform,
                                         params0,
                                         static_affine,
                                         moving_affine,
                                         starting_affine=rigid.affine)
                final = affine

            else:

                final = rigid

            np.savez(tf_filename, final)

        else:

            with np.load(tf_filename, allow_pickle=True) as npzfile:
                final = npzfile['arr_0']

        # transform the grey matter data instead of the MRI itself
        resampled = final.transform(grey)
        save_nifti(new_gmfilename, resampled, static_affine)
        resampled = final.transform(atl)
        save_nifti(new_atfilename, resampled, static_affine)

        print('finished in {:.2f}s'.format(time.process_time() - start))

    if ((os.path.isfile(new_gmfilename)) or (os.path.islink(new_gmfilename))):

        # only cube size implemented so far
        cubesize = 3

        # load the grey matter map and the template to which it was registered
        gm_img = nib.load(new_gmfilename)
        template_data = np.asarray(nib.load(template_mr).dataobj)
        gm_data = np.asarray(gm_img.dataobj)

        # find the best cube grid position (with the most nonzero cubes)
        cube_nonzeros, cube_offsets, gm_incubes = cube_grid_position(
            gm_data, template_data, cubesize)
        gm_shape = gm_incubes.shape

        # write out the cube map, where each voxel in a cube is labelled with its cube index
        # be aware of the @ operator, this is a true matrix product A[n*m] x B[m*p] = C [n*p]
        cubes_data = np.zeros(template_data.shape).flatten()
        cubes_data[cube_offsets] = np.ones(
            cubesize**3)[:, np.newaxis] @ np.arange(cube_nonzeros).reshape(
                1, cube_nonzeros)
        cubes_data = cubes_data.reshape(template_data.shape)
        cubes_file = os.path.abspath(gm_filename).replace(
            ".nii", "_cubes.nii")
        cubes_map = nib.Nifti1Image(cubes_data, gm_img.affine)
        cubes_map.to_filename(cubes_file)

        # make a randomised version of the grey matter densities in the cubes
        # 1: exchange between and inside cubes (could be too many degrees of freedom!)
        gm_random = gm_incubes.flatten()
        gm_random = gm_random[np.random.permutation(
            len(gm_random)).reshape(gm_shape)]
        # 2: exchange cubes only ( this won't change the values in the correlation matrix, only positions )
        # gm_random = gm_incubes [ :, np.random.permutation ( gm_shape [1] ) ];
        # 3: exchange cubes and shuffle inside cubes
        # gm_random = gm_incubes [ np.random.permutation ( gm_shape [0] ), np.random.permutation ( gm_shape [1] )[ :, np.newaxis ] ];

        add_diag = True

        # name of the NIfTI file with networks
        networks_file = os.path.abspath(gm_filename).replace(
            ".nii", "_gmnet.nii")

        if not ((os.path.isfile(networks_file)) or
                (os.path.islink(networks_file))):

            # compute the cross correlation for observed and randomised cubes
            networks = cube_cross_correlation(gm_incubes, gm_random, cubesize,
                                              add_diag)

            # save the networks to a file
            networks_map = nib.Nifti1Image(networks, np.eye(4))
            networks_map.to_filename(networks_file)

        else:

            print("loading already existing file")
            networks = np.asarray(nib.load(networks_file).dataobj)

    return networks, networks_file
Ejemplo n.º 29
0
def main():
    # reads the tractography data in trk format
    # extracts streamlines and the file header. Streamlines should be in the same coordinate system as the FA map (used later).
    # input example: '/home/Example_data/tracts.trk'
    tractography_file = input(
        "Please, specify the file with tracts that you would like to analyse. File should be in the trk format. "
    )

    streams, hdr = load_trk(tractography_file)  # for old DIPY version
    # sft = load_trk(tractography_file, tractography_file)
    # streams = sft.streamlines
    streams_array = np.asarray(streams)
    print('imported tractography data:' + tractography_file)

    # load T1fs_conform image that operates in the same coordinates as simnibs except for the fact the center of mesh
    # is located at the image center
    # T1fs_conform image should be generated in advance during the head meshing procedure
    # input example: fname_T1='/home/Example_data/T1fs_conform.nii.gz'

    fname_T1 = input(
        "Please, specify the T1fs_conform image that has been generated during head meshing procedure. "
    )
    data_T1, affine_T1 = load_nifti(fname_T1)

    # load FA image in the same coordinates as tracts
    # input example:fname_FA='/home/Example_data/DTI_FA.nii'
    fname_FA = input("Please, specify the FA image. ")
    data_FA, affine_FA = load_nifti(fname_FA)

    print('loaded T1fs_conform.nii and FA images')

    # specify the head mesh file that is used later in simnibs to simulate induced electric field
    # input example:'/home/Example_data/SUBJECT_MESH.msh'
    global mesh_path
    mesh_path = input("Please, specify the head mesh file. ")

    last_slach = max([i for i, ltr in enumerate(mesh_path) if ltr == '/']) + 1
    global subject_name
    subject_name = mesh_path[last_slach:-4]

    # specify the directory where you would like to save your simulation results
    # input example:'/home/Example_data/Output'
    global out_dir
    out_dir = input(
        "Please, specify the directory where you would like to save your simulation results. "
    )
    out_dir = out_dir + '/simulation_at_pos_'

    # Co-registration of T1fs_conform and FA images. Performed in 4 steps.
    # Step 1. Calculation of the center of mass transform. Used later as starting transform.
    c_of_mass = transform_centers_of_mass(data_T1, affine_T1, data_FA,
                                          affine_FA)
    print('calculated c_of_mass transformation')

    # Step 2. Calculation of a 3D translation transform. Used in the next step as starting transform.
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(data_T1,
                                  data_FA,
                                  transform,
                                  params0,
                                  affine_T1,
                                  affine_FA,
                                  starting_affine=starting_affine)
    print('calculated 3D translation transform')

    # Step 3. Calculation of a Rigid 3D transform. Used in the next step as starting transform
    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(data_T1,
                            data_FA,
                            transform,
                            params0,
                            affine_T1,
                            affine_FA,
                            starting_affine=starting_affine)
    print('calculated Rigid 3D transform')

    # Step 4. Calculation of an affine transform. Used for co-registration of T1 and FA images.
    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(data_T1,
                             data_FA,
                             transform,
                             params0,
                             affine_T1,
                             affine_FA,
                             starting_affine=starting_affine)

    print('calculated Affine 3D transform')

    identity = np.eye(4)

    inv_affine_FA = np.linalg.inv(affine_FA)
    inv_affine_T1 = np.linalg.inv(affine_T1)
    inv_affine = np.linalg.inv(affine.affine)

    # transforming streamlines to FA space
    new_streams_FA = streamline.transform_streamlines(streams, inv_affine_FA)
    new_streams_FA_array = np.asarray(new_streams_FA)

    T1_to_FA = np.dot(inv_affine_FA, np.dot(affine.affine, affine_T1))
    FA_to_T1 = np.linalg.inv(T1_to_FA)

    # transforming streamlines from FA to T1 space
    new_streams_T1 = streamline.transform_streamlines(new_streams_FA, FA_to_T1)
    global new_streams_T1_array
    new_streams_T1_array = np.asarray(new_streams_T1)

    # calculating amline derivatives along the streamlines to get the local orientation of the streamlines
    global streams_array_derivative
    streams_array_derivative = copy.deepcopy(new_streams_T1_array)

    print('calculating amline derivatives')
    for stream in range(len(new_streams_T1_array)):
        my_steam = new_streams_T1_array[stream]
        for t in range(len(my_steam[:, 0])):
            streams_array_derivative[stream][t,
                                             0] = my_deriv(t, my_steam[:, 0])
            streams_array_derivative[stream][t,
                                             1] = my_deriv(t, my_steam[:, 1])
            streams_array_derivative[stream][t,
                                             2] = my_deriv(t, my_steam[:, 2])
            deriv_norm = np.linalg.norm(streams_array_derivative[stream][t, :])
            streams_array_derivative[stream][
                t, :] = streams_array_derivative[stream][t, :] / deriv_norm

    # to create a torus representing a coil in an interactive window

    torus = vtk.vtkParametricTorus()
    torus.SetRingRadius(5)
    torus.SetCrossSectionRadius(2)

    torusSource = vtk.vtkParametricFunctionSource()
    torusSource.SetParametricFunction(torus)
    torusSource.SetScalarModeToPhase()

    torusMapper = vtk.vtkPolyDataMapper()
    torusMapper.SetInputConnection(torusSource.GetOutputPort())
    torusMapper.SetScalarRange(0, 360)

    torusActor = vtk.vtkActor()
    torusActor.SetMapper(torusMapper)

    torus_pos_x = 100
    torus_pos_y = 129
    torus_pos_z = 211
    torusActor.SetPosition(torus_pos_x, torus_pos_y, torus_pos_z)

    list_streams_T1 = list(new_streams_T1)
    # adding one fictive bundle of length 1 with coordinates [0,0,0] to avoid some bugs with actor.line during visualization
    list_streams_T1.append(np.array([0, 0, 0]))

    global bundle_native
    bundle_native = list_streams_T1

    # generating a list of colors to visualize later the stimualtion effects
    effect_max = 0.100
    effect_min = -0.100
    global colors
    colors = [
        np.random.rand(*current_streamline.shape)
        for current_streamline in bundle_native
    ]

    for my_streamline in range(len(bundle_native) - 1):
        my_stream = copy.deepcopy(bundle_native[my_streamline])
        for point in range(len(my_stream)):
            colors[my_streamline][point] = vtkplotter.colors.colorMap(
                (effect_min + effect_max) / 2,
                name='jet',
                vmin=effect_min,
                vmax=effect_max)

    colors[my_streamline + 1] = vtkplotter.colors.colorMap(effect_min,
                                                           name='jet',
                                                           vmin=effect_min,
                                                           vmax=effect_max)

    # Vizualization of fibers over T1

    # i_coord = 0
    # j_coord = 0
    # k_coord = 0
    # global number_of_stimulations
    number_of_stimulations = 0

    actor_line_list = []

    scene = window.Scene()
    scene.clear()
    scene.background((0.5, 0.5, 0.5))

    world_coords = False
    shape = data_T1.shape

    lut = actor.colormap_lookup_table(scale_range=(effect_min, effect_max),
                                      hue_range=(0.4, 1.),
                                      saturation_range=(1, 1.))

    # # the lines below is for a non-interactive demonstration run only.
    # # they should remain commented unless you set "interactive" to False
    # lut, colors = change_TMS_effects(torus_pos_x, torus_pos_y, torus_pos_z)
    # bar =  actor.scalar_bar(lut)
    # bar.SetTitle("TMS effect")
    # bar.SetHeight(0.3)
    # bar.SetWidth(0.10)
    # bar.SetPosition(0.85, 0.3)
    # scene.add(bar)

    actor_line_list.append(
        actor.line(bundle_native,
                   colors,
                   linewidth=5,
                   fake_tube=True,
                   lookup_colormap=lut))

    if not world_coords:
        image_actor_z = actor.slicer(data_T1, identity)
    else:
        image_actor_z = actor.slicer(data_T1, identity)

    slicer_opacity = 0.6
    image_actor_z.opacity(slicer_opacity)

    image_actor_x = image_actor_z.copy()
    x_midpoint = int(np.round(shape[0] / 2))
    image_actor_x.display_extent(x_midpoint, x_midpoint, 0, shape[1] - 1, 0,
                                 shape[2] - 1)

    image_actor_y = image_actor_z.copy()
    y_midpoint = int(np.round(shape[1] / 2))
    image_actor_y.display_extent(0, shape[0] - 1, y_midpoint, y_midpoint, 0,
                                 shape[2] - 1)
    """
    Connect the actors with the scene.
    """

    scene.add(actor_line_list[0])
    scene.add(image_actor_z)
    scene.add(image_actor_x)
    scene.add(image_actor_y)

    show_m = window.ShowManager(scene, size=(1200, 900))
    show_m.initialize()
    """
    Create sliders to move the slices and change their opacity.
    """

    line_slider_z = ui.LineSlider2D(min_value=0,
                                    max_value=shape[2] - 1,
                                    initial_value=shape[2] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    line_slider_x = ui.LineSlider2D(min_value=0,
                                    max_value=shape[0] - 1,
                                    initial_value=shape[0] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    line_slider_y = ui.LineSlider2D(min_value=0,
                                    max_value=shape[1] - 1,
                                    initial_value=shape[1] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    opacity_slider = ui.LineSlider2D(min_value=0.0,
                                     max_value=1.0,
                                     initial_value=slicer_opacity,
                                     length=140)
    """
    Сallbacks for the sliders.
    """
    def change_slice_z(slider):
        z = int(np.round(slider.value))
        image_actor_z.display_extent(0, shape[0] - 1, 0, shape[1] - 1, z, z)

    def change_slice_x(slider):
        x = int(np.round(slider.value))
        image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1)

    def change_slice_y(slider):
        y = int(np.round(slider.value))
        image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1)

    def change_opacity(slider):
        slicer_opacity = slider.value
        image_actor_z.opacity(slicer_opacity)
        image_actor_x.opacity(slicer_opacity)
        image_actor_y.opacity(slicer_opacity)

    line_slider_z.on_change = change_slice_z
    line_slider_x.on_change = change_slice_x
    line_slider_y.on_change = change_slice_y
    opacity_slider.on_change = change_opacity
    """
    Сreate text labels to identify the sliders.
    """

    def build_label(text):
        label = ui.TextBlock2D()
        label.message = text
        label.font_size = 18
        label.font_family = 'Arial'
        label.justification = 'left'
        label.bold = False
        label.italic = False
        label.shadow = False
        label.background = (0, 0, 0)
        label.color = (1, 1, 1)
        return label

    line_slider_label_z = build_label(text="Z Slice")
    line_slider_label_x = build_label(text="X Slice")
    line_slider_label_y = build_label(text="Y Slice")
    opacity_slider_label = build_label(text="Opacity")
    """
    Create a ``panel`` to contain the sliders and labels.
    """

    panel = ui.Panel2D(size=(300, 200),
                       color=(1, 1, 1),
                       opacity=0.1,
                       align="right")
    panel.center = (1030, 120)

    panel.add_element(line_slider_label_x, (0.1, 0.75))
    panel.add_element(line_slider_x, (0.38, 0.75))
    panel.add_element(line_slider_label_y, (0.1, 0.55))
    panel.add_element(line_slider_y, (0.38, 0.55))
    panel.add_element(line_slider_label_z, (0.1, 0.35))
    panel.add_element(line_slider_z, (0.38, 0.35))
    panel.add_element(opacity_slider_label, (0.1, 0.15))
    panel.add_element(opacity_slider, (0.38, 0.15))

    scene.add(panel)
    """
    Create a ``panel`` to show the value of a picked voxel.
    """

    label_position = ui.TextBlock2D(text='Position:')
    label_value = ui.TextBlock2D(text='Value:')

    result_position = ui.TextBlock2D(text='')
    result_value = ui.TextBlock2D(text='')

    text2 = ui.TextBlock2D(text='Calculate')

    panel_picking = ui.Panel2D(size=(250, 125),
                               color=(1, 1, 1),
                               opacity=0.1,
                               align="left")
    panel_picking.center = (200, 120)

    panel_picking.add_element(label_position, (0.1, 0.75))
    panel_picking.add_element(label_value, (0.1, 0.45))

    panel_picking.add_element(result_position, (0.45, 0.75))
    panel_picking.add_element(result_value, (0.45, 0.45))

    panel_picking.add_element(text2, (0.1, 0.15))

    icon_files = []
    icon_files.append(('left', read_viz_icons(fname='circle-left.png')))
    button_example = ui.Button2D(icon_fnames=icon_files, size=(100, 30))
    panel_picking.add_element(button_example, (0.5, 0.1))

    def change_text_callback(i_ren, obj, button):
        text2.message = str(i_coord) + ' ' + str(j_coord) + ' ' + str(k_coord)
        torusActor.SetPosition(i_coord, j_coord, k_coord)
        print(i_coord, j_coord, k_coord)
        lut, colors = change_TMS_effects(i_coord, j_coord, k_coord)
        scene.rm(actor_line_list[0])
        actor_line_list.append(
            actor.line(bundle_native,
                       colors,
                       linewidth=5,
                       fake_tube=True,
                       lookup_colormap=lut))
        scene.add(actor_line_list[1])

        nonlocal number_of_stimulations
        global bar
        if number_of_stimulations > 0:
            scene.rm(bar)
        else:
            number_of_stimulations = number_of_stimulations + 1

        bar = actor.scalar_bar(lut)
        bar.SetTitle("TMS effect")

        bar.SetHeight(0.3)
        bar.SetWidth(0.10)  # the width is set first
        bar.SetPosition(0.85, 0.3)
        scene.add(bar)

        actor_line_list.pop(0)
        i_ren.force_render()

    button_example.on_left_mouse_button_clicked = change_text_callback

    scene.add(panel_picking)
    scene.add(torusActor)

    def left_click_callback(obj, ev):
        """Get the value of the clicked voxel and show it in the panel."""
        event_pos = show_m.iren.GetEventPosition()

        obj.picker.Pick(event_pos[0], event_pos[1], 0, scene)

        global i_coord, j_coord, k_coord
        i_coord, j_coord, k_coord = obj.picker.GetPointIJK()
        print(i_coord, j_coord, k_coord)
        result_position.message = '({}, {}, {})'.format(
            str(i_coord), str(j_coord), str(k_coord))
        result_value.message = '%.8f' % data_T1[i_coord, j_coord, k_coord]
        torusActor.SetPosition(i_coord, j_coord, k_coord)

    image_actor_z.AddObserver('LeftButtonPressEvent', left_click_callback, 1.0)

    global size
    size = scene.GetSize()

    def win_callback(obj, event):
        global size
        if size != obj.GetSize():
            size_old = size
            size = obj.GetSize()
            size_change = [size[0] - size_old[0], 0]
            panel.re_align(size_change)

    show_m.initialize()
    """
    Set the following variable to ``True`` to interact with the datasets in 3D.
    """
    interactive = True

    scene.zoom(2.0)
    scene.reset_clipping_range()
    scene.set_camera(position=(-642.07, 495.40, 148.49),
                     focal_point=(127.50, 127.50, 127.50),
                     view_up=(0.02, -0.01, 1.00))

    if interactive:
        show_m.add_window_callback(win_callback)
        show_m.render()
        show_m.start()
    else:
        window.record(scene,
                      out_path=out_dir + '/bundles_and_effects.png',
                      size=(1200, 900),
                      reset_camera=True)
Ejemplo n.º 30
0
def affine_registration(moving,
                        static,
                        moving_affine=None,
                        static_affine=None,
                        pipeline=None,
                        starting_affine=None,
                        metric='MI',
                        level_iters=None,
                        sigmas=None,
                        factors=None,
                        **metric_kwargs):
    """
    Find the affine transformation between two 3D images.

    Parameters
    ----------
    moving : array, nifti image or str
        Containing the data for the moving object, or full path to a nifti file
        with the moving data.

    moving_affine : 4x4 array, optional
        An affine transformation associated with the moving object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    static : array, nifti image or str
        Containing the data for the static object, or full path to a nifti file
        with the moving data.

    static_affine : 4x4 array, optional
        An affine transformation associated with the static object. Required if
        data is provided as an array. If provided together with nifti/path,
        will over-ride the affine that is in the nifti.

    pipeline : sequence, optional
        Sequence of transforms to use in the gradual fitting of the full
        affine. Default: (executed from left to right):
        `[center_of_mass, translation, rigid, affine]`

    starting_affine: 4x4 array, optional
        Initial guess for the transformation between the spaces.
        Default: identity.

    metric : str, optional.
        Currently only supports 'MI' for MutualInformationMetric.

    nbins : int, optional
        MutualInformationMetric key-word argument: the number of bins to be
        used for computing the intensity histograms. The default is 32.

    sampling_proportion : None or float in interval (0, 1], optional
        MutualInformationMetric key-word argument: There are two types of
        sampling: dense and sparse. Dense sampling uses all voxels for
        estimating the (joint and marginal) intensity histograms, while
        sparse sampling uses a subset of them. If `sampling_proportion` is
        None, then dense sampling is used. If `sampling_proportion` is a
        floating point value in (0,1] then sparse sampling is used,
        where `sampling_proportion` specifies the proportion of voxels to
        be used. The default is None (dense sampling).

    level_iters : sequence, optional
        AffineRegistration key-word argument: the number of iterations at each
        scale of the scale space. `level_iters[0]` corresponds to the coarsest
        scale, `level_iters[-1]` the finest, where n is the length of the
        sequence. By default, a 3-level scale space with iterations
        sequence equal to [10000, 1000, 100] will be used.

    sigmas : sequence of floats, optional
        AffineRegistration key-word argument: custom smoothing parameter to
        build the scale space (one parameter for each scale). By default,
        the sequence of sigmas will be [3, 1, 0].

    factors : sequence of floats, optional
        AffineRegistration key-word argument: custom scale factors to build the
        scale space (one factor for each scale). By default, the sequence of
        factors will be [4, 2, 1].

    Returns
    -------
    transformed, affine : array with moving data resampled to the static space
    after computing the affine transformation and the affine 4x4
    associated with the transformation.


    Notes
    -----
    Performs a gradual registration between the two inputs, using a pipeline
    that gradually approximates the final registration. If the final default
    step (`affine`) is ommitted, the resulting affine may not have all 12
    degrees of freedom adjusted.
    """
    pipeline = pipeline or [center_of_mass, translation, rigid, affine]
    level_iters = level_iters or [10000, 1000, 100]
    sigmas = sigmas or [3, 1, 0.0]
    factors = factors or [4, 2, 1]

    static, static_affine, moving, moving_affine, starting_affine = \
        _handle_pipeline_inputs(moving, static,
                                moving_affine=moving_affine,
                                static_affine=static_affine,
                                starting_affine=starting_affine)

    # Define the Affine registration object we'll use with the chosen metric.
    # For now, there is only one metric (mutual information)
    use_metric = affine_metric_dict[metric](**metric_kwargs)

    affreg = AffineRegistration(metric=use_metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)

    # Go through the selected transformation:
    for func in pipeline:
        starting_affine = func(moving,
                               static,
                               static_affine=static_affine,
                               moving_affine=moving_affine,
                               starting_affine=starting_affine,
                               reg=affreg)

    # After doing all that, resample once at the end:
    affine_map = AffineMap(starting_affine, static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)

    return resampled, starting_affine
Ejemplo n.º 31
0
    def run(self,
            static_img_files,
            moving_img_files,
            transform='affine',
            nbins=32,
            sampling_prop=None,
            metric='mi',
            level_iters=[10000, 1000, 100],
            sigmas=[3.0, 1.0, 0.0],
            factors=[4, 2, 1],
            progressive=True,
            save_metric=False,
            out_dir='',
            out_moved='moved.nii.gz',
            out_affine='affine.txt',
            out_quality='quality_metric.txt'):
        """
        Parameters
        ----------
        static_img_files : string
            Path to the static image file.

        moving_img_files : string
            Path to the moving image file.

        transform : string, optional
            com: center of mass, trans: translation, rigid: rigid body
             affine: full affine including translation, rotation, shearing and
             scaling (default 'affine').

        nbins : int, optional
            Number of bins to discretize the joint and marginal PDF
             (default '32').

        sampling_prop : int, optional
            Number ([0-100]) of voxels for calculating the PDF.
             'None' implies all voxels (default 'None').

        metric : string, optional
            Similarity metric for gathering mutual information
             (default 'mi' , Mutual Information metric).

        level_iters : variable int, optional
            The number of iterations at each scale of the scale space.
             `level_iters[0]` corresponds to the coarsest scale,
             `level_iters[-1]` the finest, where n is the length of the
              sequence. By default, a 3-level scale space with iterations
              sequence equal to [10000, 1000, 100] will be used.

        sigmas : variable floats, optional
            Custom smoothing parameter to build the scale space (one parameter
             for each scale). By default, the sequence of sigmas will be
             [3, 1, 0].

        factors : variable floats, optional
            Custom scale factors to build the scale space (one factor for each
             scale). By default, the sequence of factors will be [4, 2, 1].

        progressive : boolean, optional
            Enable/Disable the progressive registration (default 'True').

        save_metric : boolean, optional
            If true, quality assessment metric are saved in
            'quality_metric.txt' (default 'False').

        out_dir : string, optional
            Directory to save the transformed image and the affine matrix
             (default '').

        out_moved : string, optional
            Name for the saved transformed image
             (default 'moved.nii.gz').

        out_affine : string, optional
            Name for the saved affine matrix
             (default 'affine.txt').

        out_quality : string, optional
            Name of the file containing the saved quality
             metric (default 'quality_metric.txt').
        """

        io_it = self.get_io_iterator()
        transform = transform.lower()

        for static_img, mov_img, moved_file, affine_matrix_file, \
                qual_val_file in io_it:

            # Load the data from the input files and store into objects.
            static, static_grid2world = load_nifti(static_img)
            moving, moving_grid2world = load_nifti(mov_img)

            check_dimensions(static, moving)

            if transform == 'com':
                moved_image, affine = self.center_of_mass(
                    static, static_grid2world, moving, moving_grid2world)
            else:

                params0 = None
                if metric != 'mi':
                    raise ValueError("Invalid similarity metric: Please"
                                     " provide a valid metric.")
                metric = MutualInformationMetric(nbins, sampling_prop)
                """
                Instantiating the registration class with the configurations.
                """

                affreg = AffineRegistration(metric=metric,
                                            level_iters=level_iters,
                                            sigmas=sigmas,
                                            factors=factors)

                if transform == 'trans':
                    moved_image, affine, \
                        xopt, fopt = self.translate(static,
                                                    static_grid2world,
                                                    moving,
                                                    moving_grid2world,
                                                    affreg,
                                                    params0)

                elif transform == 'rigid':
                    moved_image, affine, \
                        xopt, fopt = self.rigid(static,
                                                static_grid2world,
                                                moving,
                                                moving_grid2world,
                                                affreg,
                                                params0,
                                                progressive)

                elif transform == 'affine':
                    moved_image, affine, \
                        xopt, fopt = self.affine(static,
                                                 static_grid2world,
                                                 moving,
                                                 moving_grid2world,
                                                 affreg,
                                                 params0,
                                                 progressive)
                else:
                    raise ValueError('Invalid transformation:'
                                     ' Please see program\'s help'
                                     ' for allowed values of'
                                     ' transformation.')
                """
                Saving the moved image file and the affine matrix.
                """
                logging.info("Optimal parameters: {0}".format(str(xopt)))
                logging.info("Similarity metric: {0}".format(str(fopt)))

                if save_metric:
                    save_qa_metric(qual_val_file, xopt, fopt)

            save_nifti(moved_file, moved_image, static_grid2world)
            np.savetxt(affine_matrix_file, affine)
com = align_centers_of_mass(static, new_aff_static, moving, new_aff_moving)
warped = com.transform(moving)
rt.overlay_slices(static, warped, slice_type=2)

# Create the metric
nbins = 32
sampling_prop = None
metric = MattesMIMetric(nbins, sampling_prop)

# Create the optimizer
level_iters = [10000, 1000, 100]
sigmas = [3.0, 1.0, 0.0]
factors = [4, 2, 1]
affreg = AffineRegistration(metric=metric,
                            level_iters=level_iters,
                            sigmas=sigmas,
                            factors=factors)

# Translation
transform = regtransforms[('TRANSLATION', 3)]
params0 = None
starting_affine = com.affine
trans = affreg.optimize(static, moving, transform, params0,
                        new_aff_static, new_aff_moving,
                        starting_affine=starting_affine)
warped = trans.transform(moving)
rt.overlay_slices(static, warped, None, 0, "Static", "Warped", "warped_trans_0.png")
rt.overlay_slices(static, warped, None, 1, "Static", "Warped", "warped_trans_1.png")
rt.overlay_slices(static, warped, None, 2, "Static", "Warped", "warped_trans_2.png")

# Rigid
Ejemplo n.º 33
0
def ROI_registration(datapath, template, t1, b0, roi):

    t1_path = datapath + '/' + t1
    b0_path = datapath + '/' + b0
    roi_path = datapath + '/' + roi
    template_path = datapath + '/' + template

    template_img, template_affine = load_nifti(template_path)
    t1_img, t1_affine = load_nifti(t1_path)
    b0_img, b0_affine = load_nifti(b0_path)
    roi_img, roi_affine = load_nifti(roi_path)

    #diff2struct affine registartion

    moving = b0_img
    moving_grid2world = b0_affine
    static = t1_img
    static_grid2world = t1_affine
    affine_path = datapath + '/' + 'diff2struct_affine.mat'

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    sigmas = [3.0, 1.0, 0.0]
    level_iters = [10000, 1000, 100]
    factors = [4, 2, 1]
    affreg_diff2struct = AffineRegistration(metric=metric,
                                            level_iters=level_iters,
                                            sigmas=sigmas,
                                            factors=factors)

    transform = AffineTransform3D()
    params0 = None

    affine_diff2struct = affreg_diff2struct.optimize(static,
                                                     moving,
                                                     transform,
                                                     params0,
                                                     static_grid2world,
                                                     moving_grid2world,
                                                     starting_affine=None)

    saveAffineMat(affine_diff2struct, affine_path)

    # struct2standard affine registartion

    moving = t1_img
    moving_grid2world = t1_affine
    static = template_img
    static_grid2world = template_affine

    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    sigmas = [3.0, 1.0, 0.0]
    level_iters = [10000, 1000, 100]
    factors = [4, 2, 1]
    affreg_struct2standard = AffineRegistration(metric=metric,
                                                level_iters=level_iters,
                                                sigmas=sigmas,
                                                factors=factors)

    transform = AffineTransform3D()
    params0 = None
    affine_struct2standard = affreg_struct2standard.optimize(
        static,
        moving,
        transform,
        params0,
        static_grid2world,
        moving_grid2world,
        starting_affine=None)

    # struct2standard SyN registartion
    pre_align = affine_struct2standard.get_affine()
    metric = CCMetric(3)
    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_grid2world,
                           moving_grid2world, pre_align)

    warped = mapping.transform_inverse(template_img)
    warped = affine_diff2struct.transform_inverse(warped)
    template_diff_path = datapath + '/' + 'MNI152_diff'
    save_nifti(template_diff_path, warped, b0_affine)

    warped_roi = mapping.transform_inverse(roi_img)
    warped_roi = affine_diff2struct.transform_inverse(warped_roi)
    roi_diff_path = datapath + '/' + roi + '_diff.nii.gz'
    save_nifti(roi_diff_path, warped_roi, b0_affine)

    print("  Done!  ")
Ejemplo n.º 34
0
def register_3d(params):
    r'''
    Runs affine registration with the parsed parameters
    '''
    print('Registering %s to %s'%(params.moving, params.static))
    sys.stdout.flush()
    metric_name=params.metric[0:params.metric.find('[')]
    metric_params_list=params.metric[params.metric.find('[')+1:params.metric.find(']')].split(',')
    moving_mask = None
    static_mask = None
    #Initialize the appropriate metric
    if metric_name == 'MI':
        nbins=int(metric_params_list[0])
        sampling_proportion = None
        try:
            sampling_proportion = float(metric_params_list[1])
        except:
            pass
        metric = MattesMIMetric(nbins, sampling_proportion)
    elif metric_name == 'LCC':
        from dipy.align.imaffine import LocalCCMetric
        radius=int(metric_params_list[0])
        metric = LocalCCMetric(radius)
    else:
        raise ValueError('Unknown metric: %s'%(metric_name,))

    #Initialize the optimizer
    opt_iter = [int(i) for i in params.iter.split(',')]
    transforms = [t for t in params.transforms.split(',')]
    if params.ss_sigma_factor is not None:
        ss_sigma_factor = float(params.ss_sigma_factor)
    else:
        ss_sigma_factor = None
    factors = [int(i) for i in params.factors.split(',')]
    sigmas = [float(i) for i in params.sigmas.split(',')]
    #method = 'CGGS'
    method = params.method
    affreg = AffineRegistration(metric=metric,
                                level_iters=opt_iter,
                                sigmas=sigmas,
                                factors=factors,
                                method=method,
                                ss_sigma_factor=ss_sigma_factor,
                                options=None)
    #Load the data
    moving_nib = nib.load(params.moving)
    moving_affine = moving_nib.get_affine()
    moving = moving_nib.get_data().squeeze().astype(np.float64)
    # Bring the center of the image to the origin
    #c_moving = ndimage.measurements.center_of_mass(np.array(moving))
    c_moving = tuple(0.5 * np.array(moving.shape, dtype=np.float64))
    c_moving = moving_affine.dot(c_moving+(1,))
    correction_moving = np.eye(4, dtype=np.float64)
    correction_moving[:3,3] = -1 * c_moving[:3]
    centered_moving_aff = correction_moving.dot(moving_affine)

    static_nib = nib.load(params.static)
    static_affine = static_nib.get_affine()
    static = static_nib.get_data().squeeze().astype(np.float64)
    # Bring the center of the image to the origin
    #c_static = ndimage.measurements.center_of_mass(np.array(static))
    c_static = tuple(0.5 * np.array(static.shape, dtype=np.float64))
    c_static = static_affine.dot(c_static+(1,))
    correction_static = np.eye(4, dtype=np.float64)
    correction_static[:3,3] = -1 * c_static[:3]
    centered_static_aff = correction_static.dot(static_affine)

    dim = len(static.shape)
    #Run the registration
    sol = np.eye(dim + 1)
    prealign = 'mass'
    for transform_name in transforms:
        transform = regtransforms[(transform_name, dim)]
        print('Optimizing: %s'%(transform_name,))
        x0 = None
        sol = affreg.optimize(static, moving, transform, x0,
                              centered_static_aff, centered_moving_aff, starting_affine = prealign)
        prealign = sol.affine.copy()

    # Correct solution
    fixed = np.linalg.inv(correction_moving).dot(sol.affine.dot(correction_static))
    sol.set_affine(fixed)
    sol.domain_grid2world = static_affine
    sol.codomain_grid2world = moving_affine
    save_registration_results(sol, params)
    print('Solution: ', sol.affine)
Ejemplo n.º 35
0
Now we specify the sub-sampling factors. A good configuration is [4, 2, 1],
which means that, if the original image shape was (nx, ny, nz) voxels, then the
shape of the coarsest image will be about (nx//4, ny//4, nz//4), the shape in
the middle resolution will be about (nx//2, ny//2, nz//2) and the image at the
finest scale has the same size as the original image. This set of factors is
the default
"""

factors = [4, 2, 1]
"""
Now we go ahead and instantiate the registration class with the configuration
we just prepared
"""

affreg = AffineRegistration(metric=metric,
                            level_iters=level_iters,
                            sigmas=sigmas,
                            factors=factors)
"""
Using AffineRegistration we can register our images in as many stages as we
want, providing previous results as initialization for the next (the same logic
as in ANTS). The reason why it is useful is that registration is a non-convex
optimization problem (it may have more than one local optima), which means that
it is very important to initialize as close to the solution as possible. For
example, lets start with our (previously computed) rough transformation
aligning the centers of mass of our images, and then refine it in three stages.
First look for an optimal translation. The dictionary regtransforms contains
all available transforms, we obtain one of them by providing its name and the
dimension (either 2 or 3) of the image we are working with (since we are
aligning volumes, the dimension is 3)
"""
Ejemplo n.º 36
0
def warp_syn_dipy(static_fname, moving_fname):
    import os
    import numpy as np
    import nibabel as nb
    from dipy.align.metrics import CCMetric
    from dipy.align.imaffine import (transform_centers_of_mass,
                                     AffineMap,
                                     MutualInformationMetric,
                                     AffineRegistration)
    from dipy.align.transforms import (TranslationTransform3D,
                                       RigidTransform3D,
                                       AffineTransform3D)
    from dipy.align.imwarp import (DiffeomorphicMap,
                                   SymmetricDiffeomorphicRegistration)

    from nipype.utils.filemanip import fname_presuffix

    static = nb.load(static_fname)
    moving = nb.load(moving_fname)
    
    c_of_mass = transform_centers_of_mass(static.get_data(), static.affine,
                                          moving.get_data(), moving.affine)
    nbins = 32
    sampling_prop = None
    metric = MutualInformationMetric(nbins, sampling_prop)
    level_iters = [10000, 1000, 100]
    sigmas = [3.0, 1.0, 0.0]
    factors = [4, 2, 1]
    affreg = AffineRegistration(metric=metric,
                                level_iters=level_iters,
                                sigmas=sigmas,
                                factors=factors)
    transform = TranslationTransform3D()
    params0 = None
    starting_affine = c_of_mass.affine
    translation = affreg.optimize(static.get_data(), moving.get_data(),
                                  transform, params0,
                                  static.affine, moving.affine,
                                  starting_affine=starting_affine)
    
    transform = RigidTransform3D()
    params0 = None
    starting_affine = translation.affine
    rigid = affreg.optimize(static.get_data(), moving.get_data(), transform, params0,
                            static.affine, moving.affine,
                            starting_affine=starting_affine)
    transform = AffineTransform3D()
    params0 = None
    starting_affine = rigid.affine
    affine = affreg.optimize(static.get_data(), moving.get_data(), transform, params0,
                             static.affine, moving.affine,
                             starting_affine=starting_affine)
    
    metric = CCMetric(3, sigma_diff=3.)
    level_iters = [25, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
    starting_affine = affine.affine
    mapping = sdr.optimize(
	static.get_data(), moving.get_data(),
	static.affine, moving.affine,
	starting_affine)

    warped_filename = os.path.abspath(fname_presuffix(moving_fname, newpath='./', suffix='_warped', use_ext=True))
    warped = nb.Nifti1Image(mapping.transform(moving.get_data()), static.affine)
    warped.to_filename(warped_filename)

    warp_filename = os.path.abspath(fname_presuffix(moving_fname, newpath='./', suffix='_warp.npz', use_ext=False))
    np.savez(warp_filename,prealign=mapping.prealign,forward=mapping.forward,backward=mapping.backward)

    return warp_filename, warped_filename