Example #1
0
 def apply_resample(
     tensor: torch.Tensor,
     affine: np.ndarray,
     interpolation_order: int,
     target_spacing: Optional[Tuple[float, float, float]] = None,
     reference: Optional[Tuple[torch.Tensor, np.ndarray]] = None,
 ) -> Tuple[torch.Tensor, np.ndarray]:
     array = tensor.numpy()[0]
     if reference is None:
         nii = resample_to_output(
             nib.Nifti1Image(array, affine),
             voxel_sizes=target_spacing,
             order=interpolation_order,
         )
     else:
         reference_tensor, reference_affine = reference
         reference_array = reference_tensor.numpy()[0]
         nii = resample_from_to(
             nib.Nifti1Image(array, affine),
             nib.Nifti1Image(reference_array, reference_affine),
             order=interpolation_order,
         )
     tensor = torch.from_numpy(nii.get_fdata(dtype=np.float32))
     tensor = tensor.unsqueeze(dim=0)
     return tensor, nii.affine
Example #2
0
def make_ds030_subject(line, subject_index, input_path, f, mask):
    try:
        t1_filename_nii = line[0] + '.nii.gz'
        t1_filename_mnc = line[0] + '.mnc'
        label = line[8]

        if len(label) > 0:
            # convert_to_MINC(input_path, t1_filename_nii, t1_filename_mnc)
            # register_MINC(input_path + t1_filename_mnc, atlas, input_path + '/resampled/' + t1_filename_mnc)

            t1 = nib.load(input_path + '/resampled/' + t1_filename_mnc)
            atlas_image = nib.load(atlas)

            t1_resampled = resample_from_to(t1, atlas_image)
            t1_data = t1_resampled.get_data()

            if not t1_data.shape == target_size:
                print('resizing from', t1_data.shape)
                t1_data = resize_image_with_crop_or_pad(t1_data,
                                                        img_size=target_size,
                                                        mode='constant')

            # t1_data = equalize_hist(t1_data, mask=mask)

            t1_data = histogram_matching(t1_data, atlas_image.get_data())

            f['MRI'][subject_index, 0, ...] = normalise_zero_one(t1_data)

            one_hot = [0, 0]
            label_confidence = 1

            if 'ok' in label:
                one_hot = [0, 1]
            elif 'maybe' in label:
                one_hot = [0, 1]
                label_confidence = 0.66
            elif 'exclude' in label:
                one_hot = [1, 0]
            else:
                raise Exception

            label = np.argmax(one_hot)

            f['label_confidence'][subject_index] = label_confidence
            f['qc_label'][subject_index] = label
            f['dataset'][subject_index] = 'ds030'
            f['filename'][subject_index] = t1_filename_mnc

            plt.imshow(t1_data[96, ...], origin='lower')
            plt.axis('off')
            plt.savefig(output_dir + '/examples/' + t1_filename_mnc[:-4] +
                        '.png',
                        bbox_inches='tight',
                        cmap='gray')

    except Exception as e:
        print('Error:', e)
        return -1

    return subject_index
Example #3
0
def ukb_label_parts(label_parts, reference_labelmap=None):
    stitched_label = None
    mode = 'nearest'
    order = 0
    if reference_labelmap is None:
        label_shape = np.max([img.shape for img, _, _ in label_parts], axis=0)
        reference_labelmap = [img for img, _, _ in label_parts if list(img.shape) == list(label_shape)][0]
    else:
        label_shape = reference_labelmap.shape

    stitched_label = np.zeros(label_shape)
    for labelmap_img, lidx, lname in label_parts:
        print(lidx, lname)
        labelmap_img = makeit_3d(labelmap_img)
        labelmap_img = resample_from_to(labelmap_img, [label_shape, reference_labelmap.affine], order=order, mode=mode, cval=0)
        
        sx,sy,sz,ex,ey,ez = np.abs(get_points(labelmap_img, reference_labelmap))
        
        labelmap = labelmap_img.get_fdata()
        labelmap = np.multiply(lidx, labelmap)
        stitched_label[0:ex+sx, 0:ey+sy, 0:ez+sz] += labelmap
        
        print("###############################################################################################") 
        
    labelmap = np.round(stitched_label)
    stitched_labeled_img = nb.Nifti1Image(labelmap, reference_labelmap.affine, reference_labelmap.header)
    
    return stitched_labeled_img
Example #4
0
def test_against_spm_resample():
    # Test resampling against images resampled with SPM12
    # anatomical.nii has a diagonal -2, 2 2 affine;
    # functional.nii has a diagonal -4, 4 4 affine;
    # These are a bit boring, so first add some rotations and translations to
    # the anatomical image affine, and then resample to the first volume in the
    # functional, and compare to the same thing in SPM.
    # See ``make_moved_anat.py`` script in this directory for input to SPM.
    anat = nib.load(pjoin(DATA_DIR, 'anatomical.nii'))
    func = nib.load(pjoin(DATA_DIR, 'functional.nii'))
    some_rotations = euler2mat(0.1, 0.2, 0.3)
    extra_affine = from_matvec(some_rotations, [3, 4, 5])
    moved_anat = nib.Nifti1Image(anat.get_data().astype(float),
                                 extra_affine.dot(anat.affine),
                                 anat.header)
    one_func = nib.Nifti1Image(func.dataobj[..., 0],
                               func.affine,
                               func.header)
    moved2func = resample_from_to(moved_anat, one_func, order=1, cval=np.nan)
    spm_moved = nib.load(pjoin(DATA_DIR, 'resampled_anat_moved.nii'))
    assert_spm_resampling_close(moved_anat, moved2func, spm_moved)
    # Next we resample the rotated anatomical image to output space, and compare
    # to the same operation done with SPM (our own version of 'reorient.m' by
    # John Ashburner).
    moved2output = resample_to_output(moved_anat, 4, order=1, cval=np.nan)
    spm2output = nib.load(pjoin(DATA_DIR, 'reoriented_anat_moved.nii'))
    assert_spm_resampling_close(moved_anat, moved2output, spm2output);
Example #5
0
    def _morph_one_vol(self, stc_one):
        # prepare data to be morphed
        # here we use mri_resolution=True, mri_space=True because
        # we will slice afterward
        from dipy.align.reslice import reslice
        from nibabel.processing import resample_from_to
        from nibabel.spatialimages import SpatialImage
        assert stc_one.data.shape[1] == 1
        img_to = _interpolate_data(stc_one, self, mri_resolution=True,
                                   mri_space=True, output='nifti1')
        img_to = _get_img_fdata(img_to)
        assert img_to.ndim == 4 and img_to.shape[-1] == 1
        img_to = img_to[:, :, :, 0]

        # reslice to match morph
        img_to, img_to_affine = reslice(
            img_to, self.affine, _get_zooms_orig(self), self.zooms)

        # morph data
        img_to = self.sdr_morph.transform(self.pre_affine.transform(img_to))

        # subselect the correct cube if src_to is provided
        if self.src_data['to_vox_map'] is not None:
            # order=0 (nearest) should be fine since it's just subselecting
            img_to = _get_img_fdata(resample_from_to(
                SpatialImage(img_to, self.affine),
                self.src_data['to_vox_map'], order=0))

        # reshape to nvoxel x nvol:
        # in the MNE definition of volume source spaces,
        # x varies fastest, then y, then z, so we need order='F' here
        img_to = img_to.reshape(-1, order='F')
        return img_to
Example #6
0
def test_against_spm_resample():
    # Test resampling against images resampled with SPM12
    # anatomical.nii has a diagonal -2, 2 2 affine;
    # functional.nii has a diagonal -4, 4 4 affine;
    # These are a bit boring, so first add some rotations and translations to
    # the anatomical image affine, and then resample to the first volume in the
    # functional, and compare to the same thing in SPM.
    # See ``make_moved_anat.py`` script in this directory for input to SPM.
    anat = nib.load(pjoin(DATA_DIR, 'anatomical.nii'))
    func = nib.load(pjoin(DATA_DIR, 'functional.nii'))
    some_rotations = euler2mat(0.1, 0.2, 0.3)
    extra_affine = from_matvec(some_rotations, [3, 4, 5])
    moved_anat = nib.Nifti1Image(anat.get_fdata(),
                                 extra_affine.dot(anat.affine),
                                 anat.header)
    one_func = nib.Nifti1Image(func.dataobj[..., 0],
                               func.affine,
                               func.header)
    moved2func = resample_from_to(moved_anat, one_func, order=1, cval=np.nan)
    spm_moved = nib.load(pjoin(DATA_DIR, 'resampled_anat_moved.nii'))
    assert_spm_resampling_close(moved_anat, moved2func, spm_moved)
    # Next we resample the rotated anatomical image to output space, and compare
    # to the same operation done with SPM (our own version of 'reorient.m' by
    # John Ashburner).
    moved2output = resample_to_output(moved_anat, 4, order=1, cval=np.nan)
    spm2output = nib.load(pjoin(DATA_DIR, 'reoriented_anat_moved.nii'))
    assert_spm_resampling_close(moved_anat, moved2output, spm2output);
Example #7
0
def multi_vol_stitching(images, is_label=False, sampling=True):
    if len(images) == 0:
        raise Exception("Empty Image List!")

    images_sorted = sorted(images, key=lambda im: im.header['qoffset_z'], reverse=True)
    img_0 = images_sorted[0]

    mode = 'nearest' if is_label else 'constant'
    order = 0 if is_label else 3
    
    if len(images) == 1:
        if sampling:
            img_0 = resample_to_output(img_0, TARGET_RESOLUTION, order=order, mode=mode, cval=0.0)
        return img_0
    
    for idx, img_1 in enumerate(images_sorted[1:]):
        print(f'{idx}th img for stitching...')
        target_affine = img_0.affine.copy()
        target_affine[2, 3] = img_1.affine[2, 3].copy()
        target_shape = img_0.shape[:2] + img_1.shape[2:]
        img_1 = resample_from_to(img_1, [target_shape, target_affine])
        img_0 = vol_stitching(img_0, img_1)
    
    if sampling:
        img_0 = resample_to_output(img_0, TARGET_RESOLUTION, order=order, mode=mode, cval=0.0)

    return img_0
Example #8
0
 def apply_resample(
     tensor: torch.Tensor,  # (C, D, H, W)
     affine: np.ndarray,
     interpolation_order: int,
     target_spacing: Optional[Tuple[float, float, float]] = None,
     reference: Optional[Tuple[torch.Tensor, np.ndarray]] = None,
 ) -> Tuple[torch.Tensor, np.ndarray]:
     array = tensor.numpy()
     niis = []
     arrays_resampled = []
     if reference is None:
         for channel in array:
             nii = resample_to_output(
                 nib.Nifti1Image(channel, affine),
                 voxel_sizes=target_spacing,
                 order=interpolation_order,
             )
             arrays_resampled.append(nii.get_fdata(dtype=np.float32))
     else:
         reference_tensor, reference_affine = reference
         reference_array = reference_tensor.numpy()[0]
         for channel_array in array:
             nii = resample_from_to(
                 nib.Nifti1Image(channel_array, affine),
                 nib.Nifti1Image(reference_array, reference_affine),
                 order=interpolation_order,
             )
             arrays_resampled.append(nii.get_fdata(dtype=np.float32))
     tensor = torch.Tensor(arrays_resampled)
     return tensor, nii.affine
Example #9
0
def resample_nib(img, voxel_spacing=(1, 1, 1), order=3):
    """Resamples the nifti from its original spacing to another specified spacing
    
    Parameters:
    ----------
    img: nibabel image
    voxel_spacing: a tuple of 3 integers specifying the desired new spacing
    order: the order of interpolation
    
    Returns:
    ----------
    new_img: The resampled nibabel image 
    
    """
    # resample to new voxel spacing based on the current x-y-z-orientation
    aff = img.affine
    shp = img.shape
    zms = img.header.get_zooms()
    # Calculate new shape
    new_shp = tuple(np.rint([
        shp[0] * zms[0] / voxel_spacing[0],
        shp[1] * zms[1] / voxel_spacing[1],
        shp[2] * zms[2] / voxel_spacing[2]
        ]).astype(int))
    new_aff = nib.affines.rescale_affine(aff, shp, voxel_spacing, new_shp)
    new_img = nip.resample_from_to(img, (new_shp, new_aff), order=order, cval=-1024)
    print("[*] Image resampled to voxel size:", voxel_spacing)
    return new_img
Example #10
0
def check_dti_data(dti_file, mask_file, order=0):
    # Load the DTI image data and mask:
    dti_image = nibabel.load(dti_file)
    dti_data = dti_image.get_fdata()

    mask_image = nibabel.load(mask_file)
    mask = mask_image.get_fdata().astype(bool)

    # Examine the differences in shape
    print("dti shape  ", dti_data.shape)
    print("mask shape ", mask.shape)
    M1, M2, M3 = mask.shape

    # Create an empty image as a helper for mapping
    # from DTI voxel space to T1 voxel space:
    shape = numpy.zeros((M1, M2, M3, 9))
    vox2ras = mask_image.header.get_vox2ras()
    Nii = nibabel.nifti1.Nifti1Image
    helper = Nii(shape, vox2ras)

    # Resample the DTI data in the T1 voxel space:
    image = resample_from_to(dti_image, helper, order=order)
    D = image.get_fdata()
    print("resampled image shape ", D.shape)

    # Reshape D from M1 x M2 x M3 x 9 into a N x 3 x 3:
    D = D.reshape(-1, 3, 3)

    # Compute eigenvalues and eigenvectors
    lmbdas, v = numpy.linalg.eigh(D)

    def compute_FA(lmbdas):
        MD = (lmbdas[:, 0] + lmbdas[:, 1] + lmbdas[:, 2]) / 3.
        FA2 = (3. / 2.) * (
            (lmbdas[:, 0] - MD)**2 + (lmbdas[:, 1] - MD)**2 +
            (lmbdas[:, 2] - MD)**2) / (lmbdas[:, 0]**2 + lmbdas[:, 1]**2 +
                                       lmbdas[:, 2]**2)
        FA = numpy.sqrt(FA2)
        return FA

    # Compute fractional anisotropy (FA)
    FA = compute_FA(lmbdas)

    # Define valid entries as those where all eigenvalues are
    # positive and FA is between 0 and 1
    positives = (lmbdas[:, 0] > 0) * (lmbdas[:, 1] > 0) * (lmbdas[:, 2] > 0)
    valid = positives * (FA < 1.0) * (FA > 0.0)
    valid = valid.reshape((M1, M2, M3))

    # Find all voxels with invalid tensors within the mask
    ii, jj, kk = numpy.where((~valid) * mask)
    print("Number of invalid tensor voxels within the mask ROI: ", len(ii))

    # Reshape D from N x 3 x 3 to M1 x M2 x M3 x 9
    D = D.reshape((M1, M2, M3, 9))

    return valid, mask, D
Example #11
0
def test_resample_equiv(from_shape, from_affine, to_shape, to_affine, order,
                        seed):
    """Test resampling equivalences."""
    rng = np.random.RandomState(seed)
    from_data = rng.randn(*from_shape)
    is_rand = False
    if isinstance(to_affine, str):
        assert to_affine == 'rand'
        to_affine = _rand_affine(rng)
        is_rand = True
    if isinstance(from_affine, str):
        assert from_affine == 'rand'
        from_affine = _rand_affine(rng)
        is_rand = True
    to_affine = np.array(to_affine, float)
    assert to_affine.shape == (4, 4)
    from_affine = np.array(from_affine, float)
    assert from_affine.shape == (4, 4)
    #
    # 1. nibabel.processing.resample_from_to
    #
    from nibabel.processing import resample_from_to
    from nibabel.spatialimages import SpatialImage
    start = np.linalg.norm(from_data)
    got_nibabel = resample_from_to(SpatialImage(from_data, from_affine),
                                   (to_shape, to_affine),
                                   order=order).get_fdata()
    end = np.linalg.norm(got_nibabel)
    assert end > 0.05 * start  # not too much power lost
    #
    # 2. dipy.align.imaffine
    #
    import dipy.align.imaffine
    interp = 'linear' if order == 1 else 'nearest'
    got_dipy = dipy.align.imaffine.AffineMap(
        None, to_shape, to_affine, from_shape,
        from_affine).transform(from_data,
                               interpolation=interp,
                               resample_only=True)
    # XXX possibly some error in dipy or nibabel (/SciPy), or some boundary
    # condition?
    if is_rand:
        assert not np.allclose(got_dipy, got_nibabel), 'nibabel fixed'
    else:
        assert_allclose(got_dipy, got_nibabel, err_msg='dipy<->nibabel')
    #
    # 3. mne.source_space._grid_interp
    #
    trans = np.linalg.inv(from_affine) @ to_affine  # to -> from
    interp = _grid_interp(from_shape, to_shape, trans, order=order)
    got_mne = np.asarray(interp @ from_data.ravel(order='F')).reshape(
        to_shape, order='F')
    assert_allclose(got_mne, got_dipy, err_msg='MNE<->dipy')
    if is_rand:
        assert not np.allclose(got_mne, got_nibabel), 'nibabel fixed'
    else:
        assert_allclose(got_mne, got_nibabel, err_msg='MNE<->nibabel')
def load_corticalPatient_score_size():
    '''Create cortical patient data frame with score and lesion size'''
    tdf = pd.DataFrame(columns=['Subject', 'Trail Score'])
    tdf['Subject'] = np.loadtxt(
        '/home/kahwang/Trail_making_part_B_LESYMAP/subList_No_Epilepsy',
        dtype='str')
    tdf['Trail Score'] = np.loadtxt(
        '/home/kahwang/Trail_making_part_B_LESYMAP/No_Epilepsy/Data/Score.txt')

    edf = pd.DataFrame(columns=['Subject', 'Errors'])
    edf['Subject'] = np.loadtxt(
        '/home/kahwang/WCST_LESYMAP/WCST_PE_noThalamus_noEpil/subList',
        dtype='str')
    edf['Errors'] = np.loadtxt(
        '/home/kahwang/WCST_LESYMAP/WCST_PE_noThalamus_noEpil//scores.txt')

    sdf = pd.merge(tdf, edf, on=['Subject'], how='outer')

    networks = ['V', 'SM', 'DA', 'CO', 'Lim', 'FP', 'DF']
    yeof = nib.load('/data/backed_up/shared/ROIs/Yeo7network_2mm.nii.gz')
    tha_morel = nib.load('/home/kahwang/ROI_for_share/morel_overlap_2mm.nii.gz'
                         ).get_data()  # already same space as yeo template

    for i, s in enumerate(sdf['Subject']):

        # load mask and get size
        fn = '/home/kahwang/Lesion_Masks/%s.nii.gz' % s
        m = nib.load(fn)
        sdf.loc[i, 'Lesion Size'] = np.sum(m.get_data())

        #overlap with yeo network
        res_m = resample_from_to(m, yeof).get_data()
        yeo_m = yeof.get_data()

        for ii, n in enumerate(networks):
            networkpartition = yeo_m == ii + 1
            overlap = res_m * networkpartition
            sdf.loc[i, str(n) + '_overlap'] = np.sum(overlap) / np.float(
                np.sum(networkpartition))

        #exclude patients with lesions that touches the thalamus
        if np.sum(res_m * tha_morel) > 0:  #if overlap with thalamus
            sdf = sdf.drop(sdf[sdf['Subject'] == str(s)].index)

    #exclude cortical thalamus patients
    patients = [
        '0902', 1105, 1692, 1809, 1830, 2092, 2105, 2552, 2697, 2781, 3049,
        3184
    ]
    for p in patients:
        sdf = sdf.drop(sdf[sdf['Subject'] == str(p)].index)

    # Do regression of lesion size on score. The r square is .34!
    # results = sm.OLS(sdf['Trail Score'], sm.add_constant(sdf['Lesion Size'])).fit()
    # sdf['Adj Trail Score'] = results.resid
    return sdf
Example #13
0
def test_spatial_axes_check():
    for fname in MINC_3DS + OTHER_IMGS:
        img = nib.load(pjoin(DATA_DIR, fname))
        s_img = smooth_image(img, 0)
        assert_array_equal(img.dataobj, s_img.dataobj)
        out = resample_from_to(img, img, mode='nearest')
        assert_almost_equal(img.dataobj, out.dataobj)
        if len(img.shape) > 3:
            continue
        # Resample to output does not raise an error
        out = resample_to_output(img, voxel_sizes(img.affine))
    for fname in MINC_4DS:
        img = nib.load(pjoin(DATA_DIR, fname))
        with pytest.raises(ValueError):
            smooth_image(img, 0)
        with pytest.raises(ValueError):
            resample_from_to(img, img, mode='nearest')
        with pytest.raises(ValueError):
            resample_to_output(img, voxel_sizes(img.affine))
Example #14
0
def resize(img, shape=(256, 256, 400), is_label=False):
    if is_label:
        order = 0
        mode='nearest'
    else:
        order = 3
        mode='constant'    
        
    img = resample_from_to(img, [shape, img.affine], order=order, mode=mode, cval=0)
    return img
Example #15
0
def reslice_mask(mask, fref, acoreg, type_pos):
    out_mask = None
    if mask is not None:
        if type_pos == "mm_mni":
            maskaff = acoreg.dot(mask.affine)
            mask.affine[:] = maskaff[:]
        out_mask_temp = npi.resample_from_to(mask, fref, cval=0)
        out_mask = nb.Nifti1Image(np.round(out_mask_temp.get_fdata(),
                                           decimals=0),
                                  affine=out_mask_temp.affine)  #rrr ???
    return out_mask
Example #16
0
    def _resample_from_to_numpy(image, affine, to_shape, to_affine):
        _to_shape = to_shape[:-1]
        _channel = image.shape[-1]
        to_shape = np.r_[_to_shape, _channel]

        image_nib = nib.Nifti1Image(image, affine=affine)
        to_image_nib = nib_processing.resample_from_to(image_nib,
                                                       (to_shape, to_affine),
                                                       order=order)

        return to_image_nib.get_fdata().astype(np.float32)
def resize_exc_set(exc_set, template, tempDir):
    # This function resizes an SPM excursion set to an SPM template if
    # necessary.

    # Load the images
    template_img = nib.load(template)
    excset_img = nib.load(exc_set)

    # Resample if necessary
    img_resl = resample_from_to(excset_img, template_img)

    nib.save(img_resl, os.path.join(tempDir, "resizedExcSet.nii.gz"))
 def apply_affines(self, data, affines, orig_affine=np.eye(4)):
     trsfms_affine = [
         orig_affine.dot(trsfm_affine) for trsfm_affine in affines
     ]
     nb_data = data
     if isinstance(nb_data, np.ndarray):
         nb_data = nb.Nifti1Image(nb_data, orig_affine)
     resampled_data = [
         resample_from_to(nb_data, (data.shape, trsfm_affine))
         for trsfm_affine in trsfms_affine
     ]
     return resampled_data
Example #19
0
def downsample_HCPWuMinnContrast_dataset(dataset_path,
                                         dataset_info,
                                         in_postfix,
                                         out_postfix,
                                         voxel_scale=None):
    dimensions = dataset_info['dimensions']
    modalities = dataset_info['modalities']
    path = dataset_info['path']
    pattern = dataset_info['general_pattern']
    modality_categories = dataset_info['modality_categories']
    # in_postfix = dataset_info['postfix'][2] # raw input data name
    # out_postfix = dataset_info['postfix'][0] # processed output data name

    subject_lib = dataset_info['training_subjects'] + dataset_info[
        'test_subjects']
    num_volumes = dataset_info['num_volumes'][0] + dataset_info['num_volumes'][
        1]

    if voxel_scale is None:
        downsample_scale = dataset_info['downsample_scale']
        voxel_scale = [1, 1,
                       downsample_scale]  # downsample on an axial direction

    # in_data = np.zeros((num_volumes, modalities) + dimensions)
    # out_data = np.zeros((num_volumes, modalities) + dimensions)

    for img_idx in range(num_volumes):
        for mod_idx in range(modalities):
            in_filename = os.path.join(dataset_path, path, pattern).format(
                subject_lib[img_idx], modality_categories[mod_idx], in_postfix)
            out_filename = os.path.join(dataset_path, path, pattern).format(
                subject_lib[img_idx], modality_categories[mod_idx],
                out_postfix)
            print('Processing \'' + in_filename + '\'')
            data = nib.load(in_filename)  # load raw data
            fwhm = np.array(data.header.get_zooms()) * [
                0, 0, downsample_scale
            ]  # FWHM of Gaussian filter
            i_affine = np.dot(data.affine,
                              np.diag(voxel_scale + [1]))  # affine rescaling
            i_shape = np.array(
                data.shape) // voxel_scale  # downsampled shape of output
            data = smooth_image(data, fwhm)  # smoothed by FWHM
            data = resample_from_to(data, (i_shape, i_affine))  # resize
            nib.save(data, out_filename)
            print('Save to \'' + out_filename + '\'')

    return True
Example #20
0
def make_ds030_subject(line, subject_index, input_path, f, mask):
    try:
        t1_filename = line[0] + '.nii.gz'
        label = line[8]

        if len(label) > 0:
            t1 = nib.load(input_path + t1_filename)
            atlas_image = nib.load(atlas)

            t1_resampled = resample_from_to(t1, atlas_image)
            t1_data = t1_resampled.get_data()

            if not t1_data.shape == target_size:
                print('resizing from', t1_data.shape)
                t1_data = resize_image_with_crop_or_pad(t1_data,
                                                        img_size=target_size,
                                                        mode='constant')

            equalize_hist(t1_data, mask=mask)

            f['MRI'][subject_index,
                     ...] = np.float16(normalise_zero_one(t1_data))

            one_hot = [0, 0]

            if 'ok' in label:
                one_hot = [0, 1]
            elif 'maybe' in label:
                one_hot = [0, 1]
            elif 'exclude' in label:
                one_hot = [1, 0]
            else:
                raise Exception

            f['qc_label'][subject_index, :] = one_hot
            f['dataset'][subject_index] = 'ds030'

            # plt.imshow(t1_data[96, ...])
            # plt.axis('off')
            # plt.savefig(output_dir + t1_filename[:-4] + '.png', bbox_inches='tight', cmap='gray')

    except Exception as e:
        print('Error:', e)

        return -1

    return subject_index
Example #21
0
def get_interpolated_nifti(template_filename,
                           input_filename,
                           destination_dir="/out"):
    """
        Get an interpolated version of an file which is interpolated to match a reference.
        First, check if interpolated dimensions of nifti files match, if so, just return the input_filename.
        Else, if an interpolated version of the file has been created and saved in the root directory before, return its filename,
            else, create the interpolated version, and return its filename.
        Args:
            template_filename - the filename which has the desired spatial dimension
            input_filename - the filename to be interpolated
        Template for interpolated filenames example:
            input_filename = ' example.nii ' has dimension 53 x 63 x 52
            template_filename = 'template.nii' has dimension 53 x 63 x 46
            output_filename = 'example_INTERP_53_63_46.nii' has dimension 53 x 63 x 46
    """

    base_dir = os.path.dirname(input_filename)
    input_prefix, input_ext = os.path.splitext(input_filename)
    template_img = nib.load(template_filename)
    input_img = nib.load(input_filename)
    template_img = template_img.slicer[:, :, :, :input_img.shape[3]]
    template_dim = template_img.shape

    if input_img.shape == template_dim:
        return input_filename

    output_filename = os.path.join(
        base_dir,
        "%s_INTERP_%d_%d_%d.nii" % (
            input_prefix,
            template_img.shape[0],
            template_img.shape[1],
            template_img.shape[2],
        ),
    )

    if os.path.exists(output_filename):
        return output_filename

    output_img = resample_from_to(input_img, template_img)
    if destination_dir is not None:
        output_filename = os.path.join(destination_dir,
                                       os.path.basename(output_filename))
    nib.save(output_img, output_filename)

    return output_filename
def cal_corticalPatient_lesion_overlap_with_thaPatient_lesionNetwork():
    ''' calculate thalamic patient's FCmap overlap with lesion masks from cortical patients'''
    # t value t.266
    ### Cortical Patients
    patients = [
        '0902', 1105, 1692, 1809, 1830, 2092, 2105, 2552, 2697, 2781, 3049,
        3184
    ]
    sdf = pd.DataFrame(columns=['Subject', 'Trail Score'])
    sdf['Subject'] = np.loadtxt(
        '/home/kahwang/Trail_making_part_B_LESYMAP/subList_No_Epilepsy',
        dtype='str')
    sdf['Trail Score'] = np.loadtxt(
        '/home/kahwang/Trail_making_part_B_LESYMAP/No_Epilepsy/Data/Score.txt')

    #yeof = nib.load('/data/backed_up/shared/ROIs/Yeo7network_2mm.nii.gz')

    for i, s in enumerate(sdf['Subject']):
        fn = '/home/kahwang/Trail_making_part_B_LESYMAP/No_Epilepsy/Masks/%s.nii.gz' % s
        m = nib.load(fn)
        res_m = resample_from_to(m, yeof).get_data()
        yeo_m = yeof.get_data() >= 1
        masked_m = yeo_m * res_m

        for p in patients:
            fcfile = '/home/kahwang/bsh/Tha_Lesion_Mapping/NKI_ttest_%s.nii.gz' % p
            fcmap = nib.load(fcfile).get_data()[:, :, :, 0, 1]
            fcmap = fcmap > 3.66
            overlap = fcmap * masked_m
            sdf.loc[i, str(p) + '_overlap'] = np.sum(overlap) / np.sum(res_m)

    # Rank Cortical Patients
    for p in patients:
        sdf[str(p) + '_overlap' + '_rank'] = sdf[str(p) + '_overlap'].rank()

    df.loc[11, 'Patient'] = '0902'

    for i, p in enumerate(df['Patient']):
        df.loc[i, 'Cortical_Patietnt_Score'] = sdf.loc[
            sdf[str(p) + '_overlap' + '_rank'] > 518]['Trail Score'].mean()

    for i, p in enumerate(df['Patient']):
        print(sdf.loc[sdf[str(p) + '_rank'] > 508]['Subject'])
        df = tha_df
        sdf = cortical_df

    return tha_df, cortical_df
Example #23
0
def test_spatial_axes_check():
    for fname in MINC_3DS + OTHER_IMGS:
        img = nib.load(pjoin(DATA_DIR, fname))
        s_img = smooth_image(img, 0)
        assert_array_equal(img.dataobj, s_img.dataobj)
        out = resample_from_to(img, img, mode='nearest')
        assert_almost_equal(img.dataobj, out.dataobj)
        if len(img.shape) > 3:
            continue
        # Resample to output does not raise an error
        out = resample_to_output(img, voxel_sizes(img.affine))
    for fname in MINC_4DS:
        img = nib.load(pjoin(DATA_DIR, fname))
        assert_raises(ValueError, smooth_image, img, 0)
        assert_raises(ValueError, resample_from_to, img, img, mode='nearest')
        assert_raises(ValueError,
                      resample_to_output, img, voxel_sizes(img.affine))
Example #24
0
def upsample_HCPWuMinnContrast_dataset(dataset_path,
                                       dataset_info,
                                       in_postfix,
                                       out_postfix,
                                       is_training=True,
                                       interp_order=3):
    dimensions = dataset_info['dimensions']
    modalities = dataset_info['modalities']
    path = dataset_info['path']
    pattern = dataset_info['general_pattern']
    modality_categories = dataset_info['modality_categories']
    # in_postfix = dataset_info['postfix'][2] # raw input data name
    # out_postfix = dataset_info['postfix'][0] # processed output data name

    if is_training == True:
        subject_lib = dataset_info['training_subjects']
        num_volumes = dataset_info['num_volumes'][0]
    else:
        subject_lib = dataset_info['test_subjects']
        num_volumes = dataset_info['num_volumes'][1]
    voxel_scale = dataset_info['upsample_scale']  # upsample scale

    # in_data = np.zeros((num_volumes, modalities) + dimensions)
    # out_data = np.zeros((num_volumes, modalities) + dimensions)

    for img_idx in range(num_volumes):
        for mod_idx in range(modalities):
            in_filename = os.path.join(dataset_path, path, pattern).format(
                subject_lib[img_idx], modality_categories[mod_idx], in_postfix)
            out_filename = os.path.join(dataset_path, path, pattern).format(
                subject_lib[img_idx], modality_categories[mod_idx],
                out_postfix)

            # revise on 14/03/19
            # if (os.path.exists(out_filename) == False):
            data = nib.load(in_filename)  # load raw data
            i_affine = np.dot(
                data.affine,
                np.diag(tuple(1.0 / np.array(voxel_scale)) +
                        (1.0, )))  # affine rescaling
            # i_shape = np.array(data.shape) * voxel_scale  # upsampled shape of output
            data = resample_from_to(data, (dimensions, i_affine),
                                    order=interp_order)  # resize
            nib.save(data, out_filename)

    return True
Example #25
0
def plot_3d_epi_image_map(t_map, mri):
    """ Use Napari to display a coronal 3D view of the t_map superimposed on
        the MRI

    Parameters
    ----------
    t_map : Nifti1Image
        Image where pixel values are corresponding t-values
    mri : string
        path to mri file
    """

    try:
        import napari
    except ImportError:
        print('This function requires napari')
        return

    img = nib.load(mri)
    temp = nib.Nifti1Image(t_map.get_fdata()[:, :, :, 0], t_map.affine)
    resized = nbp.resample_from_to(temp, img)

    coronal_img_data = _set_coronal(img)
    coronal_map_data = _set_coronal(resized)

    controls = np.linspace(0, 1, 101)
    colors = [cold_hot(i) for i in controls]
    for i in range(48, 53):
        colors[i] = (colors[i][0], colors[i][1], colors[i][2], 0)

    cmap = Colormap(colors, controls)
    min_ = np.min(coronal_map_data)
    max_ = np.max(coronal_map_data)
    limits = (min_, -min_)
    if abs(min_) < max_:
        limits = (-max_, max_)

    viewer = napari.Viewer()
    viewer.add_image(coronal_img_data, name='image')
    viewer.add_image(coronal_map_data,
                     name='t-map',
                     opacity=0.5,
                     contrast_limits=limits,
                     colormap=cmap)

    napari.run()
Example #26
0
def reslice_im(im, fref, acoreg, type_pos):
    """
    Takes as an input an image and a template, and return the resample of the first input
    in the sp
    ace linked to the template, with the following rates:
        *percentage of output space in the intersection
        *percentage of input space in the intersection
    """
    print('resampling img')
    #if type_pos != "mm":
    if type_pos == "mm_mni":
        print('changing the image affine ')
        imgaff = acoreg.dot(im.affine)
        im.affine[:] = imgaff[:]
    out_img = npi.resample_from_to(im, fref, cval=-1)
    useful_rate = round(
        np.sum(out_img.get_fdata() != -1) / np.prod(fref.shape), 3)
    used_rate = round(np.sum(out_img.get_fdata() != -1) / np.prod(im.shape), 3)
    return out_img, useful_rate, used_rate
def cal_corticalPatient_lesion_load_on_thaPatient_lesionNetwork():
    ''' calculate thalamic patient's FCmap "load" within eac cortical patient lesion mask'''
    patients = [
        '0902', 1105, 1692, 1809, 1830, 2092, 2105, 2552, 2697, 2781, 3049,
        3184
    ]
    df = pd.read_csv('/home/kahwang/bin/LesionNetwork/Neuropsych.csv')
    sdf = pd.DataFrame(columns=['Subject', 'Trail Score'])
    sdf['Subject'] = np.loadtxt(
        '/home/kahwang/Trail_making_part_B_LESYMAP/subList_No_Epilepsy',
        dtype='str')
    sdf['Trail Score'] = np.loadtxt(
        '/home/kahwang/Trail_making_part_B_LESYMAP/No_Epilepsy/Data/Score.txt')

    yeof = nib.load('/data/backed_up/shared/ROIs/Yeo7network_2mm.nii.gz')

    for i, s in enumerate(sdf['Subject']):
        fn = '/home/kahwang/Trail_making_part_B_LESYMAP/No_Epilepsy/Masks/%s.nii.gz' % s
        m = nib.load(fn)
        res_m = resample_from_to(m, yeof)
        masked_m = yeof.get_data() * res_m.get_data()

        for p in patients:
            fcfile = '/home/kahwang/bsh/Tha_Lesion_Mapping/NKI_ttest_%s.nii.gz' % p
            fcmap = nib.load(fcfile).get_data()[:, :, :, 0, 0]
            sdf.loc[i, p] = np.sum(fcmap * masked_m)

    # Rank mask with fcmap overlap
    for p in patients:
        sdf[str(p) + '_rank'] = sdf[p].rank()

    df.loc[11, 'Patient'] = '0902'

    for i, p in enumerate(df['Patient']):
        df.loc[i, 'Cortical_Patietnt_Score'] = sdf.loc[
            sdf[str(p) + '_rank'] > 508]['Trail Score'].mean()

    for i, p in enumerate(df['Patient']):
        print(sdf.loc[sdf[str(p) + '_rank'] > 508]['Subject'])
    df = tha_df
    sdf = cortical_df
    return tha_df, cortical_df
def sort_corticalPatient_overlap_with_corticalFCNetwork():
    '''Sort cortical patients based on network partition'''

    ## Yeo labels.
    # 0            NONE   0   0   0   0
    # 1     7Networks_1 120  18 134   0 V
    # 2     7Networks_2  70 130 180   0 SM
    # 3     7Networks_3   0 118  14   0 DA
    # 4     7Networks_4 196  58 250   0 CO
    # 5     7Networks_5 220 248 164   0 LM
    # 6     7Networks_6 230 148  34   0 FP
    # 7     7Networks_7 205  62  78   0 DF

    ### Cortical Patients
    sdf = pd.DataFrame(columns=['Subject', 'Trail Score'])
    sdf['Subject'] = np.loadtxt(
        '/home/kahwang/Trail_making_part_B_LESYMAP/subList_No_Epilepsy',
        dtype='str')
    sdf['Trail Score'] = np.loadtxt(
        '/home/kahwang/Trail_making_part_B_LESYMAP/No_Epilepsy/Data/Score.txt')
    networks = ['V', 'SM', 'DA', 'CO', 'Lim', 'FP', 'DF']
    yeof = nib.load('/data/backed_up/shared/ROIs/Yeo7network_2mm.nii.gz')

    for i, s in enumerate(sdf['Subject']):
        fn = '/home/kahwang/Trail_making_part_B_LESYMAP/No_Epilepsy/Masks/%s.nii.gz' % s
        m = nib.load(fn)
        res_m = resample_from_to(m, yeof).get_data()
        yeo_m = yeof.get_data()

        for ii, n in enumerate(networks):

            networkpartition = yeo_m == ii + 1
            overlap = res_m * networkpartition
            sdf.loc[i, str(n) + '_overlap'] = np.sum(overlap)  #/np.sum(res_m)

    for ii, n in enumerate(networks):
        sdf[str(n) + '_overlap' + '_rank'] = sdf[str(n) + '_overlap'].rank()

    return sdf
Example #29
0
    def _preprocess_dmri(self, parameters: dict) -> None:
        mask_preproc_seed = parameters.get('mask_preproc_seed', {})
        mask_preproc_target = parameters.get('mask_preproc_target', {})

        upsampling = mask_preproc_seed['upsample_to']['apply']
        vox_dim = mask_preproc_seed['upsample_to']['voxel_dimensions']

        if upsampling:
            if len(vox_dim) == 1:
                vox_dim *= 3

            logging.info('stretching seed_mask to %s' %
                         'x'.join(map(str, vox_dim)))
            mapping = list(
                vox2out_vox((self.seed.shape, self.seed.affine), vox_dim))
            a = np.sign(self.seed.affine)
            b = np.sign(mapping[1])
            mapping[1] *= (a * b)
            self.highres_seed = stretch_img(self.seed, mapping)

        # Target
        downsampling = mask_preproc_target['downsample_to']['apply']
        vox_dim = mask_preproc_target['downsample_to']['voxel_dimensions']

        if downsampling:
            if len(vox_dim) == 1:
                vox_dim *= 3

            logging.info('resampling target_mask to %s' %
                         'x'.join(map(str, vox_dim)))
            mapping = list(
                vox2out_vox((self.target.shape, self.target.affine), vox_dim))
            a = np.sign(self.target.affine)
            b = np.sign(mapping[1])
            mapping[1] *= (a * b)
            self.target = resample_from_to(self.target,
                                           mapping,
                                           order=0,
                                           mode='nearest')
def rank_patient_lesion_based_on_FCNetworkOverlap():
    '''Then sort patients based on network partition'''
    sdf = load_corticalPatient_score_size()
    networks = ['V', 'SM', 'DA', 'CO', 'Lim', 'FP', 'DF']
    yeof = nib.load('/data/backed_up/shared/ROIs/Yeo7network_2mm.nii.gz')

    for i, s in enumerate(sdf['Subject']):
        fn = '/home/kahwang/Trail_making_part_B_LESYMAP/No_Epilepsy/Masks/%s.nii.gz' % s
        m = nib.load(fn)
        res_m = resample_from_to(m, yeof).get_data()
        yeo_m = yeof.get_data()

        for ii, n in enumerate(networks):

            networkpartition = yeo_m == ii + 1
            overlap = res_m * networkpartition
            sdf.loc[i, str(n) + '_overlap'] = np.sum(overlap) / np.sum(
                res_m)  # sort based on percentage of overlap with the network

    for ii, n in enumerate(networks):
        sdf[str(n) + '_overlap' + '_rank'] = sdf[str(n) + '_overlap'].rank()

    return sdf
Example #31
0
def reslice_to_ref(fin, fref, faff):
    """
    :param fin: input image to reslice (either full path or NiftiImage
    :param fref: full path to image defining space to reslice in
    :param faff: optional fullpath to a 4*4 affine matrix to apply to fin before the reslice (check with niftireg affine.txt)

    :return: numpy array of the resliced images
    """
    import nibabel.processing as nbp

    if isinstance(fin, str):
        fin = nb.load(fin)

    if faff:
        acoreg = np.loadtxt(faff, delimiter=' ')
        acoreg = np.linalg.inv(acoreg)

        imgaff = acoreg.dot(fin.affine)
        fin.affine[:] = imgaff[:]

    out_img = nbp.resample_from_to(fin, fref, cval=-1)

    fout = out_img.get_fdata()
    return fout
Example #32
0
    return rescaled_image


if __name__ == '__main__':

    import nibabel as nib
    from nibabel.processing import resample_from_to

    target_size = (192, 256, 192)

    orig = nib.load(data_dir + '/deep_abide/resampled/50002.mnc').get_data()
    atlas = nib.load(data_dir + 'mni_icbm152_t1_tal_nlin_asym_09a.mnc')

    target = nib.load(data_dir + '/ds030/sub-10225.nii.gz')
    target = resample_from_to(target, atlas)
    target = target.get_data()

    mask = nib.load(data_dir + 'mni_icbm152_t1_tal_nlin_asym_09a_mask.mnc').get_data()

    from make_datasets import resize_image_with_crop_or_pad, normalise_zero_one

    orig = normalise_zero_one(resize_image_with_crop_or_pad(orig, target_size, mode='constant'))
    target = normalise_zero_one(resize_image_with_crop_or_pad(target, target_size, mode='constant'))

    mask = np.asarray(resize_image_with_crop_or_pad(mask, target_size, mode='constant'), dtype='bool')
    target_mask = np.copy(mask)

    returned_image = normalize(orig, mask, target, target_mask)

    print('shapes:', orig.shape, target.shape, mask.shape, returned_image.shape)
Example #33
0
File: roi.py Project: IBT-FMI/SAMRI
def atlasassignment(
    data_path='~/ni_data/ofM.dr/bids/l2/anova/anova_zfstat.nii.gz',
    null_label=0.0,
    value_label='values',
    verbose=False,
    lateralized=False,
    save_as='',
    exact_zero_threshold=0.34,
):
    """
	Create CSV file containing a tabular summary of mean image intensity per DSURQE region of interest.

	Parameters
	----------
	data_path : str
		Path to data file, the values of which are to be indexed according to the DSURQEC atlas.
	null_label : float, optional
		Values of the atlas which to exclude a priori.
	value_label : string, options
		Label to apply to the value column.
	verbose : bool, optional
		Whether to print output regarding the processing (activates warnings if the data is reformatted, as well as reports for each value).
	lateralized : bool , optional
		Whether to differentiate between left and right labels (currently unimplemented).
	save_as : str, optional
		Path under which to save the atlas assignment file.

	Returns
	-------
	pandas.DataFrame
		Pandas Dataframe with columns including 'Structure', and 'right values', 'left values', or simply 'values'.
	"""

    from copy import deepcopy

    atlas_filename = '/usr/share/mouse-brain-templates/dsurqec_40micron_labels.nii'
    mapping = '/usr/share/mouse-brain-templates/dsurqe_labels.csv'
    atlas_filename = path.abspath(path.expanduser(atlas_filename))
    mapping = path.abspath(path.expanduser(mapping))
    data_path = path.abspath(path.expanduser(data_path))

    data = nib.load(data_path)
    atlas = nib.load(atlas_filename)
    voxels_ratio = 1
    if not np.array_equal(data.affine, atlas.affine):
        voxels_data = np.prod(np.shape(data))
        voxels_atlas = np.prod(np.shape(atlas))
        voxels_ratio = np.min([np.floor(voxels_atlas / voxels_data), 1])
        if verbose:
            print(data.affine, '\n', atlas.affine)
            print(np.shape(data), '\n', np.shape(atlas))
            print(
                'The affines of these atlas and data file are not identical. In order to perform this sensitive operation we need to know that there is perfect correspondence between the voxels of input data and atlas. Attempting to recast affines.'
            )
        from nibabel import processing
        data = processing.resample_from_to(data, atlas, order=0)
        if verbose:
            print(data.affine, '\n', atlas.affine)
            print(np.shape(data), '\n', np.shape(atlas))
        reshaped = True
    else:
        reshaped = False
    mapping = pd.read_csv(mapping)
    atlas = atlas.get_data().flatten()
    data = data.get_data().flatten()
    nonull_map = atlas != null_label
    atlas = atlas[nonull_map]
    data = data[nonull_map]
    structures = mapping['Structure'].unique()
    results = deepcopy(mapping)
    results[value_label] = ''
    if lateralized:
        results['Side'] = ''
        results_medial = results.loc[(
            results['right label'] == results['left label'])].copy()
        results_left = results.loc[(results['right label'] !=
                                    results['left label'])].copy()
        results_right = deepcopy(results_left)
        results_right['Side'] = 'right'
        results_left['Side'] = 'left'
    for structure in structures:
        right_label = mapping[mapping['Structure'] ==
                              structure]['right label'].item()
        left_label = mapping[mapping['Structure'] ==
                             structure]['left label'].item()
        if lateralized:
            if right_label == left_label:
                mask = atlas == right_label
                values = data[mask]
                if voxels_ratio != 1:
                    values = values[::voxels_ratio]
                if exact_zero_threshold:
                    val_number = len(values)
                    zeroes = len(values[values == 0.0])
                    if exact_zero_threshold <= zeroes / float(val_number):
                        continue
                values = ', '.join([str(i) for i in list(values)])
                results_medial.loc[results['Structure'] == structure,
                                   value_label] = values
            else:
                right_mask = atlas == right_label
                left_mask = atlas == left_label
                right_values = data[right_mask]
                left_values = data[left_mask]
                if voxels_ratio != 1:
                    right_values = right_values[::voxels_ratio]
                    left_values = left_values[::voxels_ratio]
                if exact_zero_threshold:
                    val_number = len(right_values) + len(left_values)
                    zeroes = len(right_values[right_values == 0.0]) + len(
                        right_values[right_values == 0.0])
                    if exact_zero_threshold <= zeroes / float(val_number):
                        continue
                right_values = ', '.join([str(i) for i in list(right_values)])
                left_values = ', '.join([str(i) for i in list(left_values)])
                results_right.loc[results['Structure'] == structure,
                                  value_label] = right_values
                results_left.loc[results['Structure'] == structure,
                                 value_label] = left_values
        else:
            labels = [right_label, left_label]
            mask = np.isin(atlas, labels)
            values = data[mask]
            if voxels_ratio != 1:
                values = values[::voxels_ratio]
            if exact_zero_threshold:
                val_number = len(values)
                zeroes = len(values[values == 0.0])
                if exact_zero_threshold <= zeroes / float(val_number):
                    continue
            values = list(values)
            values = ', '.join([str(i) for i in values])
            results.loc[results['Structure'] == structure,
                        value_label] = values
    if lateralized:
        results = pd.concat([results_left, results_medial, results_right],
                            ignore_index=True)
    results = results.loc[results[value_label] != '']
    if save_as:
        save_path = path.dirname(save_as)
        if save_path and not path.exists(save_path):
            os.makedirs(save_path)
        results.to_csv(save_as)
    return results
Example #34
0
def test_resample_from_to():
    # Test resampling from image to image / image space
    data = np.arange(24).reshape((2, 3, 4))
    affine = np.diag([-4, 5, 6, 1])
    img = Nifti1Image(data, affine)
    img.header['descrip'] = 'red shirt image'
    out = resample_from_to(img, img)
    assert_almost_equal(img.dataobj, out.dataobj)
    assert_array_equal(img.affine, out.affine)
    # Check resampling reverses effect of flipping axes
    # This will also test translations
    flip_ornt = np.array([[0, 1], [1, 1], [2, 1]])
    for axis in (0, 1, 2):
        ax_flip_ornt = flip_ornt.copy()
        ax_flip_ornt[axis, 1] = -1
        aff_flip_i = inv_ornt_aff(ax_flip_ornt, (2, 3, 4))
        flipped_img = Nifti1Image(flip_axis(data, axis),
                                  np.dot(affine, aff_flip_i))
        out = resample_from_to(flipped_img, ((2, 3, 4), affine))
        assert_almost_equal(img.dataobj, out.dataobj)
        assert_array_equal(img.affine, out.affine)
    # A translation of one voxel on each axis
    trans_aff = from_matvec(np.diag([-4, 5, 6]), [4, -5, -6])
    trans_img = Nifti1Image(data, trans_aff)
    out = resample_from_to(trans_img, img)
    exp_out = np.zeros_like(data)
    exp_out[:-1, :-1, :-1] = data[1:, 1:, 1:]
    assert_almost_equal(out.dataobj, exp_out)
    out = resample_from_to(img, trans_img)
    trans_exp_out = np.zeros_like(data)
    trans_exp_out[1:, 1:, 1:] = data[:-1, :-1, :-1]
    assert_almost_equal(out.dataobj, trans_exp_out)
    # Test mode with translation of first axis only
    # Default 'constant' mode first
    trans1_aff = from_matvec(np.diag([-4, 5, 6]), [4, 0, 0])
    trans1_img = Nifti1Image(data, trans1_aff)
    out = resample_from_to(img, trans1_img)
    exp_out = np.zeros_like(data)
    exp_out[1:, :, :] = data[:-1, :, :]
    assert_almost_equal(out.dataobj, exp_out)
    # Then 'nearest' mode
    out = resample_from_to(img, trans1_img, mode='nearest')
    exp_out[0, :, :] = exp_out[1, :, :]
    assert_almost_equal(out.dataobj, exp_out)
    # Test order
    trans_p_25_aff = from_matvec(np.diag([-4, 5, 6]), [1, 0, 0])
    trans_p_25_img = Nifti1Image(data, trans_p_25_aff)
    # Suprising to me, but all points outside are set to 0, even with NN
    out = resample_from_to(img, trans_p_25_img, order=0)
    exp_out = np.zeros_like(data)
    exp_out[1:, :, :] = data[1, :, :]
    assert_almost_equal(out.dataobj, exp_out)
    out = resample_from_to(img, trans_p_25_img)
    exp_out = spnd.affine_transform(data, [1, 1, 1], [-0.25, 0, 0], order=3)
    assert_almost_equal(out.dataobj, exp_out)
    # Test cval
    out = resample_from_to(img, trans_img, cval=99)
    exp_out = np.zeros_like(data) + 99
    exp_out[1:, 1:, 1:] = data[:-1, :-1, :-1]
    assert_almost_equal(out.dataobj, exp_out)
    # Out class
    out = resample_from_to(img, trans_img)
    assert_equal(out.__class__, Nifti1Image)
    # By default, type of from_img makes no difference
    n1_img = Nifti2Image(data, affine)
    out = resample_from_to(n1_img, trans_img)
    assert_equal(out.__class__, Nifti1Image)
    # Passed as keyword arg
    out = resample_from_to(img, trans_img, out_class=Nifti2Image)
    assert_equal(out.__class__, Nifti2Image)
    # If keyword arg is None, use type of from_img
    out = resample_from_to(n1_img, trans_img, out_class=None)
    assert_equal(out.__class__, Nifti2Image)
    # to_img type irrelevant in all cases
    n1_trans_img = Nifti2Image(data, trans_aff)
    out = resample_from_to(img, n1_trans_img, out_class=None)
    assert_equal(out.__class__, Nifti1Image)
    # From 2D to 3D, error, the fixed affine is not invertible
    img_2d = Nifti1Image(data[:, :, 0], affine)
    assert_raises(AffineError, resample_from_to, img_2d, img)
    # 3D to 2D, we don't need to invert the fixed matrix
    out = resample_from_to(img, img_2d)
    assert_array_equal(out.dataobj, data[:, :, 0])
    # Same for tuple as to_img imput
    out = resample_from_to(img, (img_2d.shape, img_2d.affine))
    assert_array_equal(out.dataobj, data[:, :, 0])
    # 4D input and output also OK
    data_4d = np.arange(24 * 5).reshape((2, 3, 4, 5))
    img_4d = Nifti1Image(data_4d, affine)
    out = resample_from_to(img_4d, img_4d)
    assert_almost_equal(data_4d, out.dataobj)
    assert_array_equal(img_4d.affine, out.affine)
    # Errors trying to match 3D to 4D
    assert_raises(ValueError, resample_from_to, img_4d, img)
    assert_raises(ValueError, resample_from_to, img, img_4d)