Esempio n. 1
0
    def _run_interface(self, runtime):
        # Load image, orient as RAS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = nb.as_closest_canonical(orig_img)

        # Reconstruct transform from orig to reoriented image
        ornt_xfm = nb.orientations.inv_ornt_aff(
            nb.io_orientation(orig_img.affine), orig_img.shape)

        normalized = normalize_xform(reoriented)

        # Image may be reoriented
        if normalized is not orig_img:
            out_name = fname_presuffix(fname,
                                       suffix='_ras',
                                       newpath=runtime.cwd)
            normalized.to_filename(out_name)
        else:
            out_name = fname

        mat_name = fname_presuffix(fname,
                                   suffix='.mat',
                                   newpath=runtime.cwd,
                                   use_ext=False)
        np.savetxt(mat_name, ornt_xfm, fmt='%.08f')

        self._results['out_file'] = out_name
        self._results['transform'] = mat_name

        return runtime
Esempio n. 2
0
def _tpm2roi(in_tpm,
             in_mask,
             mask_erosion_mm=None,
             erosion_mm=None,
             mask_erosion_prop=None,
             erosion_prop=None,
             pthres=0.95,
             newpath=None):
    """
    Generate a mask from a tissue probability map
    """
    tpm_img = nb.load(in_tpm)
    roi_mask = (tpm_img.get_data() >= pthres).astype(np.uint8)

    eroded_mask_file = None
    erode_in = (mask_erosion_mm is not None and mask_erosion_mm > 0
                or mask_erosion_prop is not None and mask_erosion_prop < 1)
    if erode_in:
        eroded_mask_file = fname_presuffix(in_mask,
                                           suffix='_eroded',
                                           newpath=newpath)
        mask_img = nb.load(in_mask)
        mask_data = mask_img.get_data().astype(np.uint8)
        if mask_erosion_mm:
            iter_n = max(
                int(mask_erosion_mm / max(mask_img.header.get_zooms())), 1)
            mask_data = nd.binary_erosion(mask_data, iterations=iter_n)
        else:
            orig_vol = np.sum(mask_data > 0)
            while np.sum(mask_data > 0) / orig_vol > mask_erosion_prop:
                mask_data = nd.binary_erosion(mask_data, iterations=1)

        # Store mask
        eroded = nb.Nifti1Image(mask_data, mask_img.affine, mask_img.header)
        eroded.set_data_dtype(np.uint8)
        eroded.to_filename(eroded_mask_file)

        # Mask TPM data (no effect if not eroded)
        roi_mask[~mask_data] = 0

    # shrinking
    erode_out = (erosion_mm is not None and erosion_mm > 0
                 or erosion_prop is not None and erosion_prop < 1)
    if erode_out:
        if erosion_mm:
            iter_n = max(int(erosion_mm / max(tpm_img.header.get_zooms())), 1)
            iter_n = int(erosion_mm / max(tpm_img.header.get_zooms()))
            roi_mask = nd.binary_erosion(roi_mask, iterations=iter_n)
        else:
            orig_vol = np.sum(roi_mask > 0)
            while np.sum(roi_mask > 0) / orig_vol > erosion_prop:
                roi_mask = nd.binary_erosion(roi_mask, iterations=1)

    # Create image to resample
    roi_fname = fname_presuffix(in_tpm, suffix='_roi', newpath=newpath)
    roi_img = nb.Nifti1Image(roi_mask, tpm_img.affine, tpm_img.header)
    roi_img.set_data_dtype(np.uint8)
    roi_img.to_filename(roi_fname)
    return roi_fname, eroded_mask_file or in_mask
Esempio n. 3
0
    def _run_interface(self, runtime):
        in_files = self.inputs.in_files
        if not isinstance(in_files, list):
            in_files = [self.inputs.in_files]

        # Generate output average name early
        self._results['out_avg'] = fname_presuffix(self.inputs.in_files[0],
                                                   suffix='_avg', newpath=runtime.cwd)

        if self.inputs.to_ras:
            in_files = [reorient(inf, newpath=runtime.cwd)
                        for inf in in_files]

        if len(in_files) == 1:
            filenii = nb.load(in_files[0])
            filedata = filenii.get_data()

            # magnitude files can have an extra dimension empty
            if filedata.ndim == 5:
                sqdata = np.squeeze(filedata)
                if sqdata.ndim == 5:
                    raise RuntimeError('Input image (%s) is 5D' % in_files[0])
                else:
                    in_files = [fname_presuffix(in_files[0], suffix='_squeezed',
                                                newpath=runtime.cwd)]
                    nb.Nifti1Image(sqdata, filenii.get_affine(),
                                   filenii.get_header()).to_filename(in_files[0])

            if np.squeeze(nb.load(in_files[0]).get_data()).ndim < 4:
                self._results['out_file'] = in_files[0]
                self._results['out_avg'] = in_files[0]
                # TODO: generate identity out_mats and zero-filled out_movpar
                return runtime
            in_files = in_files[0]
        else:
            magmrg = fsl.Merge(dimension='t', in_files=self.inputs.in_files)
            in_files = magmrg.run().outputs.merged_file
        mcflirt = fsl.MCFLIRT(cost='normcorr', save_mats=True, save_plots=True,
                              ref_vol=0, in_file=in_files)
        mcres = mcflirt.run()
        self._results['out_mats'] = mcres.outputs.mat_file
        self._results['out_movpar'] = mcres.outputs.par_file
        self._results['out_file'] = mcres.outputs.out_file

        hmcnii = nb.load(mcres.outputs.out_file)
        hmcdat = hmcnii.get_data().mean(axis=3)
        if self.inputs.zero_based_avg:
            hmcdat -= hmcdat.min()

        nb.Nifti1Image(
            hmcdat, hmcnii.get_affine(), hmcnii.get_header()).to_filename(
            self._results['out_avg'])

        return runtime
Esempio n. 4
0
def _applytfms(args):
    """
    Applies ANTs' antsApplyTransforms to the input image.
    All inputs are zipped in one tuple to make it digestible by
    multiprocessing's map
    """
    import nibabel as nb
    from niworkflows.nipype.utils.filemanip import fname_presuffix
    from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms

    in_file, in_xform, ifargs, index, newpath = args
    out_file = fname_presuffix(in_file, suffix='_xform-%05d' % index,
                               newpath=newpath, use_ext=True)

    copy_dtype = ifargs.pop('copy_dtype', False)
    xfm = ApplyTransforms(
        input_image=in_file, transforms=in_xform, output_image=out_file, **ifargs)
    xfm.terminal_output = 'allatonce'
    xfm.resource_monitor = False
    runtime = xfm.run().runtime

    if copy_dtype:
        nii = nb.load(out_file)
        in_dtype = nb.load(in_file).get_data_dtype()

        # Overwrite only iff dtypes don't match
        if in_dtype != nii.get_data_dtype():
            nii.set_data_dtype(in_dtype)
            nii.to_filename(out_file)

    return (out_file, runtime.cmdline)
Esempio n. 5
0
    def _run_interface(self, runtime):
        out_file = fname_presuffix(
            self.inputs.in_file, suffix='_joined.tsv', newpath=runtime.cwd,
            use_ext=False)

        header = ''
        if isdefined(self.inputs.columns) and self.inputs.columns:
            header = '\t'.join(self.inputs.columns)

        with open(self.inputs.in_file) as ifh:
            data = ifh.read().splitlines(keepends=False)

        with open(self.inputs.join_file) as ifh:
            join = ifh.read().splitlines(keepends=False)

        assert len(data) == len(join)

        merged = []
        for d, j in zip(data, join):
            line = '%s\t%s' % ((j, d) if self.inputs.side == 'left' else (d, j))
            merged.append(line)

        if header:
            merged.insert(0, header)

        with open(out_file, 'w') as ofh:
            ofh.write('\n'.join(merged))

        self._results['out_file'] = out_file
        return runtime
Esempio n. 6
0
def phdiff2fmap(in_file, delta_te, newpath=None):
    r"""
    Converts the input phase-difference map into a fieldmap in Hz,
    using the eq. (1) of [Hutton2002]_:

    .. math::

        \Delta B_0 (\text{T}^{-1}) = \frac{\Delta \Theta}{2\pi\gamma \Delta\text{TE}}


    In this case, we do not take into account the gyromagnetic ratio of the
    proton (:math:`\gamma`), since it will be applied inside TOPUP:

    .. math::

        \Delta B_0 (\text{Hz}) = \frac{\Delta \Theta}{2\pi \Delta\text{TE}}

    """
    import math
    import numpy as np
    import nibabel as nb
    from niworkflows.nipype.utils.filemanip import fname_presuffix
    #  GYROMAG_RATIO_H_PROTON_MHZ = 42.576

    out_file = fname_presuffix(in_file, suffix='_fmap', newpath=newpath)
    image = nb.load(in_file)
    data = (image.get_data().astype(np.float32) / (2. * math.pi * delta_te))
    nii = nb.Nifti1Image(data, image.affine, image.header)
    nii.set_data_dtype(np.float32)
    nii.to_filename(out_file)
    return out_file
Esempio n. 7
0
    def _run_interface(self, runtime):
        masknii = compute_epi_mask(self.inputs.in_files,
                                   lower_cutoff=self.inputs.lower_cutoff,
                                   upper_cutoff=self.inputs.upper_cutoff,
                                   connected=self.inputs.connected,
                                   opening=self.inputs.opening,
                                   exclude_zeros=self.inputs.exclude_zeros,
                                   ensure_finite=self.inputs.ensure_finite,
                                   target_affine=self.inputs.target_affine,
                                   target_shape=self.inputs.target_shape)

        if self.inputs.no_sanitize:
            in_file = self.inputs.in_files
            if isinstance(in_file, list):
                in_file = in_file[0]
            nii = nb.load(in_file)
            qform, code = nii.get_qform(coded=True)
            masknii.set_qform(qform, int(code))
            sform, code = nii.get_sform(coded=True)
            masknii.set_sform(sform, int(code))

        self._results['out_mask'] = fname_presuffix(self.inputs.in_files[0],
                                                    suffix='_mask',
                                                    newpath=runtime.cwd)
        masknii.to_filename(self._results['out_mask'])
        return runtime
Esempio n. 8
0
    def _run_interface(self, runtime):

        in_file = nb.load(self.inputs.in_file)
        wm_mask = nb.load(self.inputs.wm_mask).get_data()
        wm_mask[wm_mask < 0.9] = 0
        wm_mask[wm_mask > 0] = 1
        wm_mask = wm_mask.astype(np.uint8)

        if self.inputs.erodemsk:
            # Create a structural element to be used in an opening operation.
            struc = nd.generate_binary_structure(3, 2)
            # Perform an opening operation on the background data.
            wm_mask = nd.binary_erosion(wm_mask, structure=struc).astype(np.uint8)

        data = in_file.get_data()
        data *= 1000.0 / np.median(data[wm_mask > 0])

        out_file = fname_presuffix(self.inputs.in_file,
                                   suffix='_harmonized', newpath='.')
        in_file.__class__(data, in_file.affine, in_file.header).to_filename(
            out_file)

        self._results['out_file'] = out_file

        return runtime
Esempio n. 9
0
    def _run_interface(self, runtime):
        img = nb.load(self.inputs.in_file)
        out_report = os.path.abspath('report.html')

        qform_code = img.header._structarr['qform_code']
        sform_code = img.header._structarr['sform_code']

        # Valid affine information
        if (qform_code, sform_code) != (0, 0):
            self._results['out_file'] = self.inputs.in_file
            open(out_report, 'w').close()
            self._results['out_report'] = out_report
            return runtime

        out_fname = fname_presuffix(self.inputs.in_file, suffix='_valid', newpath=runtime.cwd)

        # Nibabel derives a default LAS affine from the shape and zooms
        # Use scanner xform code to indicate no alignment has been done
        img.set_sform(img.affine, nb.nifti1.xform_codes['scanner'])

        img.to_filename(out_fname)
        self._results['out_file'] = out_fname

        snippet = (r'<h3 class="elem-title">WARNING - Invalid header</h3>',
                   r'<p class="elem-desc">Input file does not have valid qform or sform matrix.',
                   r'A default, LAS-oriented affine has been constructed.',
                   r'A left-right flip may have occurred.',
                   r'Analyses of this dataset MAY BE INVALID.</p>')

        with open(out_report, 'w') as fobj:
            fobj.write('\n'.join('\t' * 3 + line for line in snippet))
            fobj.write('\n')

        self._results['out_report'] = out_report
        return runtime
Esempio n. 10
0
    def _run_interface(self, runtime):
        in_files = self.inputs.in_files

        indices = list(range(len(in_files)))
        if isdefined(self.inputs.indices):
            indices = self.inputs.indices

        if len(self.inputs.in_files) < 2:
            self._results['out_file'] = in_files[0]
            return runtime

        first_fname = in_files[indices[0]]
        if len(indices) == 1:
            self._results['out_file'] = first_fname
            return runtime

        im = nb.concat_images([in_files[i] for i in indices])
        data = im.get_data().astype(float).sum(axis=3)
        data = np.clip(data, a_min=0.0, a_max=1.0)

        out_file = fname_presuffix(first_fname, suffix='_tpmsum',
                                   newpath=runtime.cwd)
        newnii = im.__class__(data, im.affine, im.header)
        newnii.set_data_dtype(np.float32)

        # Set visualization thresholds
        newnii.header['cal_max'] = 1.0
        newnii.header['cal_min'] = 0.0
        newnii.to_filename(out_file)
        self._results['out_file'] = out_file

        return runtime
Esempio n. 11
0
def _arrange_xfms(transforms, num_files, tmp_folder):
    """
    Convenience method to arrange the list of transforms that should be applied
    to each input file
    """
    base_xform = ['#Insight Transform File V1.0', '#Transform 0']
    # Initialize the transforms matrix
    xfms_T = []
    for i, tf_file in enumerate(transforms):
        # If it is a deformation field, copy to the tfs_matrix directly
        if guess_type(tf_file)[0] != 'text/plain':
            xfms_T.append([tf_file] * num_files)
            continue

        with open(tf_file) as tf_fh:
            tfdata = tf_fh.read().strip()

        # If it is not an ITK transform file, copy to the tfs_matrix directly
        if not tfdata.startswith('#Insight Transform File'):
            xfms_T.append([tf_file] * num_files)
            continue

        # Count number of transforms in ITK transform file
        nxforms = tfdata.count('#Transform')

        # Remove first line
        tfdata = tfdata.split('\n')[1:]

        # If it is a ITK transform file with only 1 xform, copy to the tfs_matrix directly
        if nxforms == 1:
            xfms_T.append([tf_file] * num_files)
            continue

        if nxforms != num_files:
            raise RuntimeError(
                'Number of transforms (%d) found in the ITK file does not match'
                ' the number of input image files (%d).' %
                (nxforms, num_files))

        # At this point splitting transforms will be necessary, generate a base name
        out_base = fname_presuffix(tf_file,
                                   suffix='_pos-%03d_xfm-{:05d}' % i,
                                   newpath=tmp_folder.name).format
        # Split combined ITK transforms file
        split_xfms = []
        for xform_i in range(nxforms):
            # Find start token to extract
            startidx = tfdata.index('#Transform %d' % xform_i)
            next_xform = base_xform + tfdata[startidx + 1:startidx + 4] + ['']
            xfm_file = out_base(xform_i)
            with open(xfm_file, 'w') as out_xfm:
                out_xfm.write('\n'.join(next_xform))
            split_xfms.append(xfm_file)
        xfms_T.append(split_xfms)

    # Transpose back (only Python 3)
    return list(map(list, zip(*xfms_T)))
Esempio n. 12
0
    def _run_interface(self, runtime):
        out_file = fname_presuffix(self.inputs.in_file, suffix='_motion.tsv', newpath=runtime.cwd,
                                   use_ext=False)
        data = np.loadtxt(self.inputs.in_file)
        np.savetxt(out_file, data, delimiter='\t', header='\t'.join(self.inputs.columns),
                   comments='')

        self._results['out_file'] = out_file
        return runtime
Esempio n. 13
0
def _flatten_split_merge(in_files):
    if isinstance(in_files, str):
        in_files = [in_files]

    nfiles = len(in_files)

    all_nii = []
    for fname in in_files:
        nii = nb.squeeze_image(nb.load(fname))

        if nii.get_data().ndim > 3:
            all_nii += nb.four_to_three(nii)
        else:
            all_nii.append(nii)

    if len(all_nii) == 1:
        LOGGER.warning('File %s cannot be split', all_nii[0])
        return in_files[0], in_files

    if len(all_nii) == nfiles:
        flat_split = in_files
    else:
        splitname = fname_presuffix(in_files[0],
                                    suffix='_split%04d',
                                    newpath=os.getcwd())
        flat_split = []
        for i, nii in enumerate(all_nii):
            flat_split.append(splitname % i)
            nii.to_filename(flat_split[-1])

    # Only one 4D file was supplied
    if nfiles == 1:
        merged = in_files[0]
    else:
        # More that one in_files - need merge
        merged = fname_presuffix(in_files[0],
                                 suffix='_merged',
                                 newpath=os.getcwd())
        nb.concat_images(all_nii).to_filename(merged)

    return merged, flat_split
Esempio n. 14
0
def _fix_hdr(in_file, newpath=None):
    import nibabel as nb
    from niworkflows.nipype.utils.filemanip import fname_presuffix

    nii = nb.load(in_file)
    hdr = nii.header.copy()
    hdr.set_data_dtype('<f4')
    hdr.set_intent('vector', (), '')
    out_file = fname_presuffix(in_file, "_warpfield", newpath=newpath)
    nb.Nifti1Image(nii.get_data().astype('<f4'), nii.affine,
                   hdr).to_filename(out_file)
    return out_file
Esempio n. 15
0
def _hz2rads(in_file, out_file=None):
    """Transform a fieldmap in Hz into rad/s"""
    import os
    from math import pi
    import nibabel as nb
    from niworkflows.nipype.utils.filemanip import fname_presuffix
    if out_file is None:
        out_file = fname_presuffix(in_file, suffix='_rads',
                                   newpath=os.getcwd())
    nii = nb.load(in_file)
    data = nii.get_data() * 2.0 * pi
    nb.Nifti1Image(data, nii.get_affine(),
                   nii.get_header()).to_filename(out_file)
    return out_file
Esempio n. 16
0
def add_suffix(in_files, suffix):
    """
    Wrap nipype's fname_presuffix to conveniently just add a prefix

    >>> add_suffix([
    ...     '/path/to/sub-045_ses-test_T1w.nii.gz',
    ...     '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
    'sub-045_ses-test_T1w_test.nii.gz'

    """
    import os.path as op
    from niworkflows.nipype.utils.filemanip import fname_presuffix, filename_to_list
    return op.basename(
        fname_presuffix(filename_to_list(in_files)[0], suffix=suffix))
Esempio n. 17
0
def _tohz(in_file, range_hz, newpath=None):
    """Convert a field map to Hz units"""
    from math import pi
    import nibabel as nb
    from niworkflows.nipype.utils.filemanip import fname_presuffix

    out_file = fname_presuffix(in_file, suffix='_hz', newpath=newpath)
    fmapnii = nb.load(in_file)
    fmapdata = fmapnii.get_data()
    fmapdata = fmapdata * (range_hz / pi)
    out_img = nb.Nifti1Image(fmapdata, fmapnii.affine, fmapnii.header)
    out_img.set_data_dtype('float32')
    out_img.to_filename(out_file)
    return out_file
Esempio n. 18
0
def _demean(in_file, in_mask, out_file=None):
    import os
    import numpy as np
    import nibabel as nb
    from niworkflows.nipype.utils.filemanip import fname_presuffix

    if out_file is None:
        out_file = fname_presuffix(in_file,
                                   suffix='_demeaned',
                                   newpath=os.getcwd())
    nii = nb.load(in_file)
    msk = nb.load(in_mask).get_data()
    data = nii.get_data()
    data -= np.median(data[msk > 0])
    nb.Nifti1Image(data, nii.affine, nii.header).to_filename(out_file)
    return out_file
Esempio n. 19
0
def _tohz(in_file, cutoff_hz, out_file=None):
    import os
    from math import pi
    import nibabel as nb
    from niworkflows.nipype.utils.filemanip import fname_presuffix

    if out_file is None:
        out_file = fname_presuffix(in_file, suffix='_hz', newpath=os.getcwd())

    fmapnii = nb.load(in_file)
    fmapdata = fmapnii.get_data()
    fmapdata = fmapdata * (cutoff_hz / pi)
    out_img = nb.Nifti1Image(fmapdata, fmapnii.affine, fmapnii.header)
    out_img.set_data_dtype('float32')
    out_img.to_filename(out_file)
    return out_file
Esempio n. 20
0
    def _run_interface(self, runtime):

        self._results['out_file'] = fname_presuffix(self.inputs.in_anat,
                                                    suffix='_rbrainmask',
                                                    newpath=runtime.cwd)

        anatnii = nb.load(self.inputs.in_anat)
        msknii = nb.Nifti1Image(
            grow_mask(anatnii.get_data(),
                      nb.load(self.inputs.in_aseg).get_data(),
                      nb.load(self.inputs.in_ants).get_data()), anatnii.affine,
            anatnii.header)
        msknii.set_data_dtype(np.uint8)
        msknii.to_filename(self._results['out_file'])

        return runtime
Esempio n. 21
0
def _mat2itk(args):
    from niworkflows.nipype.interfaces.c3 import C3dAffineTool
    from niworkflows.nipype.utils.filemanip import fname_presuffix

    in_file, in_ref, in_src, index, newpath = args
    # Generate a temporal file name
    out_file = fname_presuffix(in_file, suffix='_itk-%05d.txt' % index,
                               newpath=newpath)

    # Run c3d_affine_tool
    C3dAffineTool(transform_file=in_file, reference_file=in_ref, source_file=in_src,
                  fsl2ras=True, itk_transform=out_file, resource_monitor=False).run()
    transform = '#Transform %d\n' % index
    with open(out_file) as itkfh:
        transform += ''.join(itkfh.readlines()[2:])

    return (index, transform)
Esempio n. 22
0
    def _run_interface(self, runtime):
        from nilearn import image as nli

        t1_img = nli.load_img(self.inputs.in_file)
        t1_data = t1_img.get_data()
        epi_data = nli.load_img(self.inputs.epi_ref).get_data()

        # We assume the image is already masked
        mask = t1_data > 0

        t1_min, t1_max = np.unique(t1_data)[[1, -1]]
        epi_min, epi_max = np.unique(epi_data)[[1, -1]]
        scale_factor = (epi_max - epi_min) / (t1_max - t1_min)

        inv_data = mask * ((t1_max - t1_data) * scale_factor + epi_min)

        out_file = fname_presuffix(self.inputs.in_file, suffix='_inv', newpath=runtime.cwd)
        nli.new_img_like(t1_img, inv_data, copy_header=True).to_filename(out_file)
        self._results['out_file'] = out_file
        return runtime
Esempio n. 23
0
    def _run_interface(self, runtime):
        in_file = nb.load(self.inputs.in_file)
        data = in_file.get_data()
        mask = np.zeros_like(data, dtype=np.uint8)
        mask[data <= 0] = 1

        # Pad one pixel to control behavior on borders of binary_opening
        mask = np.pad(mask, pad_width=(1,), mode='constant', constant_values=1)

        # Remove noise
        struc = nd.generate_binary_structure(3, 2)
        mask = nd.binary_opening(mask, structure=struc).astype(
            np.uint8)

        # Remove small objects
        label_im, nb_labels = nd.label(mask)
        if nb_labels > 2:
            sizes = nd.sum(mask, label_im, list(range(nb_labels + 1)))
            ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
            for _, label in ordered[2:]:
                mask[label_im == label] = 0

        # Un-pad
        mask = mask[1:-1, 1:-1, 1:-1]

        # If mask is small, clean-up
        if mask.sum() < 500:
            mask = np.zeros_like(mask, dtype=np.uint8)

        out_img = in_file.__class__(mask, in_file.affine, in_file.header)
        out_img.header.set_data_dtype(np.uint8)

        out_file = fname_presuffix(self.inputs.in_file,
                                   suffix='_rotmask', newpath='.')
        out_img.to_filename(out_file)
        self._results['out_file'] = out_file
        return runtime
Esempio n. 24
0
    def _run_interface(self, runtime):
        import nibabel as nb
        import nilearn.image as nli
        from nipype.utils.filemanip import fname_presuffix, copyfile

        in_names = self.inputs.t1w_list
        orig_imgs = [nb.load(fname) for fname in in_names]
        reoriented = [nb.as_closest_canonical(img) for img in orig_imgs]
        target_shape = np.max([img.shape for img in reoriented], axis=0)
        target_zooms = np.min([img.header.get_zooms()[:3]
                               for img in reoriented], axis=0)

        resampled_imgs = []
        for img in reoriented:
            zooms = np.array(img.header.get_zooms()[:3])
            shape = np.array(img.shape)

            xyz_unit = img.header.get_xyzt_units()[0]
            if xyz_unit == 'unknown':
                # Common assumption; if we're wrong, unlikely to be the only thing that breaks
                xyz_unit = 'mm'
            # Set a 0.05mm threshold to performing rescaling
            atol = {'meter': 5e-5, 'mm': 0.05, 'micron': 50}[xyz_unit]

            # Rescale => change zooms
            # Resize => update image dimensions
            rescale = not np.allclose(zooms, target_zooms, atol=atol)
            resize = not np.all(shape == target_shape)
            if rescale or resize:
                target_affine = np.eye(4, dtype=img.affine.dtype)
                if rescale:
                    scale_factor = target_zooms / zooms
                    target_affine[:3, :3] = np.diag(scale_factor).dot(img.affine[:3, :3])
                else:
                    target_affine[:3, :3] = img.affine[:3, :3]

                if resize:
                    # The shift is applied after scaling.
                    # Use a proportional shift to maintain relative position in dataset
                    size_factor = (target_shape.astype(float) + shape) / (2 * shape)
                    # Use integer shifts to avoid unnecessary interpolation
                    offset = (img.affine[:3, 3] * size_factor - img.affine[:3, 3]).astype(int)
                    target_affine[:3, 3] = img.affine[:3, 3] + offset
                else:
                    target_affine[:3, 3] = img.affine[:3, 3]

                data = nli.resample_img(img, target_affine, target_shape).get_data()
                img = img.__class__(data, target_affine, img.header)

            resampled_imgs.append(img)

        out_names = [fname_presuffix(fname, suffix='_ras', newpath=runtime.cwd)
                     for fname in in_names]

        for orig, final, in_name, out_name in zip(orig_imgs, resampled_imgs,
                                                  in_names, out_names):
            if final is orig:
                copyfile(in_name, out_name, copy=True, use_hardlink=True)
            else:
                final.to_filename(out_name)

        self._results['t1w_list'] = out_names

        return runtime
Esempio n. 25
0
def add_suffix(in_files, suffix):
    import os.path as op
    from niworkflows.nipype.utils.filemanip import fname_presuffix, filename_to_list
    return op.basename(fname_presuffix(filename_to_list(in_files)[0],
                                       suffix=suffix))