示例#1
0
def resample_images_ref(images,reference,interpolation,resample_dim=None):
    '''Resample many images to single reference

    images: nibabal.Nifti1Image list 
        should be list of image files or nibabel images
    
    reference: nibabel.Nifti1Image
        single image file or nibabel image
    '''

    if isinstance(reference,str): reference = nibabel.load(reference)
    if resample_dim:
        affine = numpy.diag(resample_dim)
        reference = resample_img(reference, target_affine=affine)

    # Resample images to match reference mask affine and shape
    if not isinstance(images,list): images = [images]
    images_nii = get_nii_obj(images)

    # Make sure we don't have any with singleton dimension
    images = squeeze_fourth_dimension(images_nii)

    images_resamp = []
    for image in images_nii:
        # Only resample if the image is different from the reference
        if not (image.get_affine() == reference.get_affine()).all():
            resampled_img = resample_img(image,target_affine=reference.get_affine(), 
                                         target_shape=reference.shape,
                                         interpolation=interpolation)
        else: 
            resampled_img = image
        
        images_resamp.append(resampled_img)
    
    return images_resamp, reference
def make_resampled_transformation(nii_obj,resample_dim=[4,4,4],standard_mask=True):

    nii_obj = get_nii_obj(nii_obj)[0]

    # To set 0s to nan, we need to have float64 data type
    true_zeros = numpy.zeros(nii_obj.shape) # default data_type is float64
    true_zeros[:] = nii_obj.get_data()
    true_zeros[true_zeros==0] = numpy.nan

    # Resample image to 4mm voxel, nans are preserved
    true_zeros = nib.nifti1.Nifti1Image(true_zeros,affine=nii_obj.get_affine())
    
    # Standard brain masking
    if standard_mask == True:
        standard = get_standard_mask(voxdim=resample_dim[0])
        true_zeros = resample_img(true_zeros,target_affine=standard.get_affine(), 
                                  target_shape=standard.shape)
      
        # Mask the image 
        masked_true_zeros = numpy.zeros(true_zeros.shape)
        masked_true_zeros[standard.get_data()!=0] = true_zeros.get_data()[standard.get_data()!=0]
        true_zeros = nib.nifti1.Nifti1Image(masked_true_zeros,affine=true_zeros.get_affine())

    # or just resample
    else: 
        if (resample_dim != numpy.diag(true_zeros.get_affine())[0:3]).all():
            true_zeros = resample_img(true_zeros,target_affine=numpy.diag(resample_dim))

    return true_zeros
示例#3
0
    def fit(self):

        if self.resampling is not None:
            resample = np.diag(self.resampling * np.ones(3))
        else:
            resample =  None

        self.mask_img = resample_img(self.mask_img, target_affine=resample, interpolation='nearest')

        if not isinstance(self.rois, tuple):
            self.masker = dict()
            for roi_id, roi in enumerate(self.rois):
                if self.resampling is not None:
                    roi = resample_img(roi, target_affine=resample, interpolation='nearest')
                self.masker[roi_id] = NiftiMasker(mask_img=roi)
                self.masker[roi_id].fit()
        else:
            self.masker = [None] * len(self.rois)  # first create as list..
            for m, rois_modality in enumerate(self.rois):
                self.masker[m] = dict()
                for roi_id, roi in enumerate(rois_modality):
                    if self.resampling is not None:
                        roi = resample_img(roi, target_affine=resample, interpolation='nearest')
                    self.masker[m][roi_id] = NiftiMasker(mask_img=roi)
                    self.masker[m][roi_id].fit()
            self.masker = tuple(self.masker)  # .. then make conform again

        return self
示例#4
0
    def fit(self, X, y):
        """Fit estimators from the training set (X, y).

        Returns
        -------
        self : object
            Returns self.
        """

        if self.base_estimator_._estimator_type == 'classifier':
            scoring = None
        else:
            scoring = 'mean_squared_error'

        X_test = nibabel.load(X[0]) if isinstance(X[0], str) else X[0]
        mask_test = nibabel.load(self.mask_img) if isinstance(self.mask_img, str) else self.mask_img
        process_mask_test = nibabel.load(self.process_mask_img) if isinstance(self.process_mask_img, str) else self.process_mask_img
        if not np.array_equal(X_test.affine, mask_test.affine) or not np.array_equal(X_test.shape, mask_test.shape):
            self.mask_img = resample_img(mask_test, target_affine=X_test.affine, target_shape=X_test.shape, interpolation='nearest')
        if not np.array_equal(X_test.affine, process_mask_test.affine) or not np.array_equal(X_test.shape, process_mask_test.shape):
            self.process_mask_img = resample_img(process_mask_test, target_affine=X_test.affine, target_shape=X_test.shape, interpolation='nearest')

        searchlight = SearchLight(self.mask_img, process_mask_img=self.process_mask_img, estimator=self.base_estimator_, scoring=scoring,
                                  radius=self.radius, n_jobs=self.n_jobs, estimator_params=self.base_estimator_args, cv=LeaveOneOut(n=len(y)))
        searchlight.fit(X, y)
        if np.all(searchlight.scores_ == 0):
            raise RuntimeError('Ooops, something went probably wrong: all searchlight scores have value 0.')
        if self.base_estimator_._estimator_type == 'classifier':
            best_centers = np.unravel_index(np.argsort(searchlight.scores_, axis=None)[-self.n_estimators:],
                                            searchlight.scores_.shape)
        else:
            best_centers = np.unravel_index(np.argsort(.1/(-searchlight.scores_ - 1e-30), axis=None)[-self.n_estimators:],
                                            searchlight.scores_.shape)
        self.best_spheres = get_sphere_indices(self.mask_img, np.array(best_centers).T.tolist(), self.radius)

        # for v in range(self.n_estimators):
        #     self.estimators_ += [ESTIMATOR_CATALOG[searchlight.estimator](**self.estimator_params)]
        #     self.estimators_[v].fit(np.array([x.get_data()[self.best_spheres[v]] for x in X]), y)

        estimators = []
        for i in range(self.n_estimators):
            estimator = self._make_estimator(append=False)
            estimators.append(estimator)

        if not isinstance(X[0], nibabel.Nifti1Image):
            X = [nibabel.load(x) for x in X]


        # for v, e in enumerate(estimators):
        #     print(v)
        #     _parallel_build_estimator(e, np.array([x.get_data()[self.best_spheres[v]] for x in X]), y)

        estimators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(
                     delayed(_parallel_build_estimator)(e, np.array([x.get_data()[self.best_spheres[v]] for x in X]), y)
                     for v, e in enumerate(estimators))

        self.estimators_ = estimators

        return self
示例#5
0
def _gen_reference(fixed_image, moving_image, out_file=None):
    if out_file is None:
        out_file = fname_presuffix(fixed_image,
                                   suffix='_reference',
                                   newpath=os.getcwd())
    new_zooms = nli.load_img(moving_image).header.get_zooms()[:3]
    # Avoid small differences in reported resolution to cause changes to
    # FOV. See https://github.com/poldracklab/fmriprep/issues/512
    new_zooms_round = np.round(new_zooms, 3)
    nli.resample_img(fixed_image,
                     target_affine=np.diag(new_zooms_round),
                     interpolation='nearest').to_filename(out_file)
    return out_file
示例#6
0
def _gen_reference(fixed_image, moving_image, out_file=None):
    import numpy
    from nilearn.image import resample_img, load_img

    if out_file is None:
        out_file = genfname(fixed_image, suffix='reference')
    new_zooms = load_img(moving_image).header.get_zooms()[:3]
    # Avoid small differences in reported resolution to cause changes to
    # FOV. See https://github.com/poldracklab/fmriprep/issues/512
    new_zooms_round = numpy.round(new_zooms, 3)
    resample_img(fixed_image, target_affine=numpy.diag(new_zooms_round),
                 interpolation='nearest').to_filename(out_file)
    return out_file
示例#7
0
def parcel_masker(mask, coords, parcel_list, label_names, dir_path, ID):
    from pynets import nodemaker
    from nilearn.image import resample_img
    from nilearn import masking
    ##For parcel masking, specify overlap thresh and error cushion in mm voxels
    perc_overlap = 0.75 ##Default is >=90% overlap

    mask_img = nib.load(mask)
    mask_data, _ = masking._load_mask_img(mask)

    i = 0
    indices = []
    for parcel in parcel_list:
        parcel_vol = np.zeros(mask_data.shape, dtype=bool)
        parcel_data_reshaped = resample_img(parcel, target_affine=mask_img.affine,
                               target_shape=mask_data.shape).get_data()
        parcel_vol[parcel_data_reshaped==1] = 1
        
        ##Count number of unique voxels where overlap of parcel and mask occurs
        overlap_count = len(np.unique(np.where((mask_data.astype('uint8')==1) & (parcel_vol.astype('uint8')==1))))
        
        ##Count number of total unique voxels within the parcel
        total_count = len(np.unique(np.where(((parcel_vol.astype('uint8')==1)))))
        
        ##Calculate % overlap
        try:
            overlap = float(overlap_count/total_count)
        except:
            overlap = float(0)
        
        if overlap >= perc_overlap:
            print(str(round(100*overlap,1)) + '% of parcel ' + str(label_names[i]) + ' falls within mask...')
        else:
            indices.append(i)
        i = i + 1

    label_names_adj=list(label_names)
    coords_adj = list(tuple(x) for x in coords)
    parcel_list_adj = parcel_list
    for ix in sorted(indices, reverse=True):
        print('Removing: ' + str(label_names_adj[ix]) + ' at ' + str(coords_adj[ix]))
        label_names_adj.pop(ix)
        coords_adj.pop(ix)
        parcel_list_adj.pop(ix)
        
    ##Create a resampled 3D atlas that can be viewed alongside mask img for QA
    resampled_parcels_nii_path = dir_path + '/' + ID + '_parcels_resampled2mask_' + str(os.path.basename(mask).split('.')[0]) + '.nii.gz'
    resampled_parcels_atlas, _ = nodemaker.create_parcel_atlas(parcel_list_adj)
    resampled_parcels_map_nifti = resample_img(resampled_parcels_atlas, target_affine=mask_img.affine, target_shape=mask_data.shape)
    nib.save(resampled_parcels_map_nifti, resampled_parcels_nii_path)
    return(coords_adj, label_names_adj, parcel_list_adj)
示例#8
0
def not_in_mni(nii, plot=False):
    this_path = os.path.abspath(os.path.dirname(__file__))
    mask_nii = nb.load(
        os.path.join(this_path, "static", 'anatomical',
                     'MNI152_T1_2mm_brain_mask.nii.gz'))

    #resample to the smaller one
    if np.prod(nii.shape) > np.prod(mask_nii.shape):
        nan_mask = np.isnan(nii.get_data())
        if nan_mask.sum() > 0:
            nii.get_data()[nan_mask] = 0
        nii = resample_img(nii,
                           target_affine=mask_nii.get_affine(),
                           target_shape=mask_nii.get_shape(),
                           interpolation='nearest')
    else:
        mask_nii = resample_img(mask_nii,
                                target_affine=nii.get_affine(),
                                target_shape=nii.get_shape(),
                                interpolation='nearest')

    brain_mask = mask_nii.get_data() > 0
    excursion_set = np.logical_not(
        np.logical_or(nii.get_data() == 0, np.isnan(nii.get_data())))

    # deals with AFNI files
    if len(excursion_set.shape) == 5:
        excursion_set = excursion_set[:, :, :, 0, 0]
    in_brain_voxels = np.logical_and(excursion_set, brain_mask).sum()
    out_of_brain_voxels = np.logical_and(excursion_set,
                                         np.logical_not(brain_mask)).sum()

    perc_mask_covered = in_brain_voxels / float(brain_mask.sum()) * 100.0
    if np.isnan(perc_mask_covered):
        perc_mask_covered = 0
    perc_voxels_outside_of_mask = out_of_brain_voxels / float(
        excursion_set.sum()) * 100.0

    if perc_mask_covered > 50:
        if perc_mask_covered < 90 and perc_voxels_outside_of_mask > 20:
            ret = True
        else:
            ret = False
    elif perc_mask_covered == 0:
        ret = True
    elif perc_voxels_outside_of_mask > 50:
        ret = True
    else:
        ret = False

    return ret, perc_mask_covered, perc_voxels_outside_of_mask
示例#9
0
def _gen_reference(fixed_image, moving_image, out_file=None):
    import numpy
    from nilearn.image import resample_img, load_img

    if out_file is None:
        out_file = genfname(fixed_image, suffix='reference')
    new_zooms = load_img(moving_image).header.get_zooms()[:3]
    # Avoid small differences in reported resolution to cause changes to
    # FOV. See https://github.com/poldracklab/fmriprep/issues/512
    new_zooms_round = numpy.round(new_zooms, 3)
    resample_img(fixed_image,
                 target_affine=numpy.diag(new_zooms_round),
                 interpolation='nearest').to_filename(out_file)
    return out_file
示例#10
0
def not_in_mni(nii, target_template_image=DEFAULT_TEMPLATE, plot=False):
    this_path = os.path.abspath(os.path.dirname(__file__))

    POSSIBLE_TEMPLATES = get_possible_templates()
    mask_path = POSSIBLE_TEMPLATES[target_template_image]['mask']
    if mask_path==None:
        return False, 100.0, 100.0

    mask_nii = nb.load(os.path.join(this_path, "static", 'anatomical',mask_path))

    #resample to the smaller one
    if np.prod(nii.shape) > np.prod(mask_nii.shape):
        nan_mask = np.isnan(nii.get_data())
        if nan_mask.sum() > 0:
            nii.get_data()[nan_mask] = 0
        nii = resample_img(nii, target_affine=mask_nii.get_affine(), target_shape=mask_nii.get_shape(),interpolation='nearest')
    else:
        mask_nii = resample_img(mask_nii, target_affine=nii.get_affine(), target_shape=nii.get_shape(),interpolation='nearest')

    brain_mask = mask_nii.get_data() > 0
    excursion_set = np.logical_not(np.logical_or(nii.get_data() == 0, np.isnan(nii.get_data())))

    # deals with AFNI files
    if len(excursion_set.shape) == 5:
        excursion_set = excursion_set[:, :, :, 0, 0]
    # deal with 4D files
    elif len(excursion_set.shape) == 4:
        excursion_set = excursion_set[:, :, :, 0]
    in_brain_voxels = np.logical_and(excursion_set, brain_mask).sum()
    out_of_brain_voxels = np.logical_and(excursion_set, np.logical_not(brain_mask)).sum()


    perc_mask_covered = in_brain_voxels/float(brain_mask.sum())*100.0
    if np.isnan(perc_mask_covered):
        perc_mask_covered = 0
    perc_voxels_outside_of_mask = out_of_brain_voxels/float(excursion_set.sum())*100.0

    if perc_mask_covered > 50:
        if perc_mask_covered < 90 and perc_voxels_outside_of_mask > 20:
            ret = True
        else:
            ret = False
    elif perc_mask_covered == 0:
        ret = True
    elif perc_voxels_outside_of_mask > 50:
        ret = True
    else:
        ret = False

    return ret, perc_mask_covered, perc_voxels_outside_of_mask
示例#11
0
    def create_clean_mask(self, num_std_dev=1.5):
        """
        Create a subject-refined version of the clustering mask.
        """
        import os
        from pynets.core import utils
        from nilearn.masking import intersect_masks
        from nilearn.image import index_img, math_img, resample_img
        mask_name = os.path.basename(self.clust_mask).split('.nii')[0]
        self.atlas = "%s%s%s%s%s" % (mask_name, '_', self.clust_type, '_k', str(self.k))
        print("%s%s%s%s%s%s%s" % ('\nCreating atlas using ', self.clust_type, ' at cluster level ', str(self.k),
                                  ' for ', str(self.atlas), '...\n'))
        self._dir_path = utils.do_dir_path(self.atlas, self.func_file)
        self.uatlas = "%s%s%s%s%s%s%s%s" % (self._dir_path, '/', mask_name, '_clust-', self.clust_type, '_k',
                                            str(self.k), '.nii.gz')

        # Load clustering mask
        self._func_img.set_data_dtype(np.float32)
        func_vol_img = index_img(self._func_img, 1)
        func_vol_img.set_data_dtype(np.uint16)
        clust_mask_res_img = resample_img(nib.load(self.clust_mask), target_affine=func_vol_img.affine,
                                          target_shape=func_vol_img.shape, interpolation='nearest')
        clust_mask_res_img.set_data_dtype(np.uint16)
        func_data = np.asarray(func_vol_img.dataobj).astype('float32')
        func_int_thr = np.round(np.mean(func_data[func_data > 0]) - np.std(func_data[func_data > 0]) * num_std_dev, 3)
        if self.mask is not None:
            self._mask_img = nib.load(self.mask)
            self._mask_img.set_data_dtype(np.uint16)
            mask_res_img = resample_img(self._mask_img, target_affine=func_vol_img.affine,
                                        target_shape=func_vol_img.shape, interpolation='nearest')
            mask_res_img.set_data_dtype(np.uint16)
            self._clust_mask_corr_img = intersect_masks([math_img('img > ' + str(func_int_thr), img=func_vol_img),
                                                         math_img('img > 0.01', img=clust_mask_res_img),
                                                         math_img('img > 0.01', img=mask_res_img)],
                                                        threshold=1, connected=False)
            self._clust_mask_corr_img.set_data_dtype(np.uint16)
            self._mask_img.uncache()
            mask_res_img.uncache()
        else:
            self._clust_mask_corr_img = intersect_masks([math_img('img > ' + str(func_int_thr), img=func_vol_img),
                                                         math_img('img > 0.01', img=clust_mask_res_img)],
                                                        threshold=1, connected=False)
            self._clust_mask_corr_img.set_data_dtype(np.uint16)
        nib.save(self._clust_mask_corr_img, "%s%s%s%s" % (self._dir_path, '/', mask_name, '.nii.gz'))

        del func_data
        func_vol_img.uncache()
        clust_mask_res_img.uncache()

        return self.atlas
示例#12
0
def resample(img_array, target_voxel=(1, 1, 1)):
    '''
    :param img_array:input type; nifti file format
    :param target_voxel: resample size
    '''
    target_voxel_size = np.diag(target_voxel)
    img_resampled = image.resample_img(img_array,
                                       target_affine=target_voxel_size)
    voxel_size = check_voxel_size(img_resampled)
    img_resampled_img = image.resample_img(
        img_array, target_affine=target_voxel_size).get_data()
    img_resampled_img = np.rot90(img_resampled_img, 2)

    return img_resampled_img, voxel_size
示例#13
0
def undo_transform(mask, original):
    """Undo transforation

    Function which reverts a previously performed transformation.

    Args:
        mask (Nifti1Image): Image to be reverted to a previous state
        original (Nifti1Image): The original model which serves as a reference

    Returns:
        new_mask (Nifti1Image): The reverted image

    """
    shape = original.get_data().shape
    new_mask = resample_img(mask,
                            original.affine,
                            target_shape=shape,
                            interpolation='nearest')
    # Adds a description to the nifti image

    new_mask.header["descrip"] = np.array(
        "Segmentation of " + str(original.header["db_name"])[2:-1],
        dtype='|S80')

    return new_mask
示例#14
0
def read_resize_image(in_file,
                      image_shape=None,
                      interpolation='linear',
                      crop=None):
    """
    Resizes nifti based on nibabel functions instead of SITK library used by read_image()
    :returns - resized image with proper affine matrices
    """
    print("Reading: {0}".format(in_file))
    image = nib.load(os.path.abspath(in_file))
    image = fix_shape(image)
    if crop:
        image = crop_img_to(image, crop, copy=True)
    if image_shape:
        new_shape = image_shape
        # Reorder (get rids of rotations in the affine)
        image = reorder_img(image, resample=interpolation)
        # Calculate voxel spacing for desired image shape
        zoom_level = np.divide(new_shape, image.shape)
        current_spacing = [1, 1, 1] * image.affine[:3, :3]
        new_spacing = np.divide(current_spacing, zoom_level)
        # Calculate the new affine matrix
        new_affine = np.eye(4)
        new_affine[:3, :3] = np.eye(3) * new_spacing
        new_affine[:3, 3] = image.affine[:3, 3]
        # Resample new image
        image = resample_img(image,
                             target_shape=new_shape,
                             target_affine=new_affine,
                             interpolation=interpolation)

    return image
示例#15
0
def similarity_transform_volumes(
    image,
    affine_trans,
    target_size,
    interpolation='continuous',
):
    image_size = np.shape(image)
    possible_scales = np.true_divide(image_size, target_size)
    crop_scale = np.max(possible_scales)
    if crop_scale <= 1:
        crop_scale = 1
    scale_transform = np.diag((crop_scale, crop_scale, crop_scale, 1))
    shift = -(np.asarray(target_size) -
              np.asarray(image_size // np.asarray(crop_scale), )) // 2
    affine_trans_to_center = np.eye(4)
    affine_trans_to_center[:, 3] = [shift[0], shift[1], shift[2], 1]

    transform = np.matmul(affine_trans, scale_transform)
    transform = np.matmul(transform, affine_trans_to_center)

    nifti_img = nib.Nifti1Image(image, affine=np.eye(4))
    nifti_image_t = nil_image.resample_img(
        nifti_img,
        target_affine=transform,
        target_shape=target_size,
        interpolation=interpolation,
    )
    image_t = nifti_image_t.get_data()

    return image_t, transform
示例#16
0
def coreg_atlas_to_diff(dwi_dir, net_parcels_map_nifti):
    import nibabel as nib
    from nilearn.image import resample_img
    print('\nTransforming atlas to diffusion space...')
    merged_f_samples_path = "%s%s" % (dwi_dir, '/merged_f1samples.nii.gz')
    if os.path.exists(merged_f_samples_path) is True:
        dwi_infile = merged_f_samples_path
    else:
        dwi_infile = "%s%s" % (dwi_dir, '/dwi.nii.gz')

    out_file = "%s%s" % (dwi_dir, '/net_parcels_map_nifti_diff.nii.gz')

    img = nib.load(net_parcels_map_nifti)
    vox_sz = img.affine[0][0]
    targ_aff = img.affine / (np.array(
        [[int(abs(vox_sz)), 1, 1, 1], [1, int(abs(vox_sz)), 1, 1],
         [1, 1, int(abs(vox_sz)), 1], [1, 1, 1, 1]]))
    new_file_atlas_res = resample_img(net_parcels_map_nifti,
                                      target_affine=targ_aff)
    nib.save(new_file_atlas_res, out_file)

    # Apply transform between diff and MNI using FLIRT to get mask in diff space
    cmd = "flirt -in {} -init {} -ref {} -out {} -omat {} -interp trilinear -cost mutualinfo -applyxfm -dof 12 -searchrx -180 180 -searchry -180 180 -searchrz -180 180"
    cmd_run = cmd.format(net_parcels_map_nifti,
                         "%s%s" % (dwi_dir, '/xfms/MNI2diff.mat'), dwi_infile,
                         out_file, '/tmp/out_flirt.mat')
    os.system(cmd_run)
    return out_file
示例#17
0
def resample_img(source_image, target_shape, voxel_dims=[2., 2., 2.]):
    """
        This function resamples an input image to a specific (mm)
        isotropic voxels and crops it to a new dimensional pixel-grid.
        Parameters
        ----------
        source_image : nibabel.nifti1.Nifti1Image
            Nifti image to resample
        target_shape: list of float
            3 numbers to specify the dimensions of the resampled image.
        voxel_dims : list
            Length in mm for x,y, and z dimensions of each voxel.
        Returns
        -------
        resampled_img : nibabel.nifti1.Nifti1Image
            The resampled image.
        """
    voxel_transform = image.resample_img(source_image,
                                         target_affine=np.diag(voxel_dims))
    ref = np.array(voxel_transform.dataobj)
    ref = np.squeeze(ref, axis=3)
    x_dim, y_dim, _ = ref.shape
    ref = resize(ref, (x_dim, y_dim, 90), order=3)
    print(ref.shape)
    final_img = np.zeros(target_shape)
    for i in range(10, 110):
        for j in range(10, 110):
            final_img[i - 10, j - 10, :] = ref[i, j, :]

    return final_img
示例#18
0
def resample(base, ingested, template):
    """
    Resamples the image such that images which have already been aligned
    in real coordinates also overlap in the image/voxel space.

    **Positional Arguments**
            base:
                - Image to be aligned
            ingested:
                - Name of image after alignment
            template:
                - Image that is the target of the alignment
    """
    # Loads images
    template_im = nib.load(template)
    base_im = nib.load(base)
    # Aligns images
    target_im = nl.resample_img(
        base_im,
        target_affine=template_im.get_affine(),
        target_shape=template_im.get_data().shape,
        interpolation="nearest",
    )
    # Saves new image
    nib.save(target_im, ingested)
示例#19
0
def fetch_basc_vascular_atlas(n_scales='scale007',
                              target_affine=np.diag((5, 5, 5))):
    """ Fetch the BASC brain atlas given its resolution.

    Parameters
    ----------
    hrf_atlas: str, BASC dataset name possible values are: 'scale007',
        'scale012', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325',
        'scale444'

    target_affine : np.array, (default=np.diag((5, 5, 5))), affine matrix for
        the produced Nifti images

    Return
    ------
    mask_full_brain : Nifti Image, full mask brain
    atlas_rois : Nifti Image, ROIs atlas
    """
    if n_scales not in valid_scales:
        raise ValueError(f"n_scales should be in {valid_scales}, "
                         f"got '{n_scales}'")

    basc_dataset = datasets.fetch_atlas_basc_multiscale_2015(version='sym')
    atlas_rois_fname = basc_dataset[n_scales]
    atlas_to_return = image.load_img(atlas_rois_fname)

    atlas_to_return = image.resample_img(atlas_to_return,
                                         target_affine,
                                         interpolation='nearest')

    brain_mask = image_nilearn.binarize_img(atlas_to_return, threshold=0)

    return brain_mask, atlas_to_return
示例#20
0
 def get_masks(self):
     self.masks = nib.load(
         join(self.flex_dir, 'fmri/atlases/', 'Glasser_full.nii'))
     self.masks = image.resample_img(
         self.masks,
         self.trialbetas.affine,
         target_shape=self.trialbetas.get_fdata().shape[0:3])
示例#21
0
def main():
    opt = Options().parse()
    assert (opt.input.endswith('nii.gz'))
    inputVolume = nib.load(opt.input)
    N = inputVolume.shape[2]
    target_shape = (opt.fineSize, opt.fineSize, N)
    data = resample_img(inputVolume,
                        inputVolume.affine,
                        target_shape=target_shape).get_data()

    model = Model()
    model.initialize(opt)
    output = torch.FloatTensor(N, 3, opt.fineSize, opt.fineSize)
    for i in range(N):
        if opt.verbose:
            print('process slice %d' % i)
        model.set_input({'A': _toTensor(data[:, :, i])})
        model.forward()
        output[i] = model.fake_B.detach().cpu()

    output = _RGBtoGray(output)
    outputImg = nib.Nifti1Image(
        output.permute(1, 2, 0).numpy(), inputVolume.affine)
    outputfile = opt.output
    if not outputfile.endswith("nii.gz"):
        outputfile = "%s.nii.gz" % (outputfile)
    print('save output as %s' % outputfile)
    nib.save(outputImg, outputfile)
示例#22
0
def down_sample(src_img, factor=2):
    """

    Parameters
    ----------
    src_img: nii image
    factor: subsmapling factor (default 2)

    Returns
    -------
    nii subsample image

    Examples
    --------
    >>> import nilearn.datasets
    >>> import brainomics.image_resample
    >>> mni159_img = nilearn.datasets.load_mni152_template()

    >>> resamp_img = brainomics.image_resample.down_sample(src_img=mni159_img, factor=2)

    >>> print("SRC:", mni159_img.header.get_zooms(), mni159_img.get_fdata().shape)
    >>> print("DST:", resamp_img.header.get_zooms(), resamp_img.get_fdata().shape)

    >>> mni159_img.to_filename("/tmp/mni152_%imm.nii.gz" % mni159_img.header.get_zooms()[0])
    >>> resamp_img.to_filename("/tmp/mni152_%imm.nii.gz" % resamp_img.header.get_zooms()[0])
    """
    from nilearn.image import resample_img
    target_affine = np.copy(src_img.affine)[:3, :][:, :3]
    target_affine[:3, :3] *= factor
    return resample_img(src_img, target_affine=target_affine)
示例#23
0
def test_view_img():
    mni = datasets.load_mni152_template()
    with warnings.catch_warnings(record=True) as w:
        # Create a fake functional image by resample the template
        img = image.resample_img(mni, target_affine=3 * np.eye(3))
        html_view = html_stat_map.view_img(img)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, threshold='95%')
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, bg_img=mni)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, bg_img=None)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, threshold=2., vmax=4.)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, symmetric_cmap=False)
        img_4d = image.new_img_like(img, img.get_data()[:, :, :, np.newaxis])
        assert len(img_4d.shape) == 4
        html_view = html_stat_map.view_img(img_4d, threshold=2., vmax=4.)
        _check_html(html_view)

    # Check that all warnings were expected
    warnings_set = set(warning_.category for warning_ in w)
    expected_set = set([FutureWarning, UserWarning,
                       DeprecationWarning])
    assert warnings_set.issubset(expected_set), (
        "the following warnings were not expected: {}").format(
        warnings_set.difference(expected_set))
示例#24
0
def get_individuals_paths_01(path_fmri=dataset_path+"/datasets/01/fMRI/", task=1, run=1, resolution_factor = 5, number_individuals=10):
    
    fmri_individuals = []
    file_individuals = sorted([f for f in listdir(path_fmri) if isdir(join(path_fmri, f))])

    target_shape = image.load_img(path_fmri + file_individuals[0] + '/3_nw_mepi_rest_with_cross.nii.gz').shape
    target_shape = (int(target_shape[0]/resolution_factor), 
                    int(target_shape[1]/resolution_factor), 
                    int(target_shape[2]/resolution_factor))
    
    for i in range(number_individuals):
        
        individual = file_individuals[i]

        fmri_file = '/3_nw_mepi_rest_with_cross.nii.gz'

        individual_path = path_fmri + individual + fmri_file
        
        img = image.load_img(individual_path)
        
        #scale affine accordingly
        off_set = img.affine[:,3]
        new_affine = img.affine*resolution_factor
        new_affine[:,3] = off_set
        
        fmri_image = image.resample_img(img, 
                                        target_affine=new_affine,
                                        target_shape=target_shape,
                                        interpolation='nearest')

        fmri_individuals += [fmri_image]

    return fmri_individuals
示例#25
0
    def apply_mask(self, mask):
        """ Mask Brain_Data instance

        Args:
            mask: mask (Brain_Data or nifti object)
            
        """

        if isinstance(mask,Brain_Data):
            mask = mask.to_nifti() # convert to nibabel
        if not isinstance(mask, nib.Nifti1Image):
            if type(mask) is str:
                if os.path.isfile(mask):
                    mask = nib.load(mask)
               # Check if mask need to be resampled into Brain_Data mask space
                if not ((self.mask.get_affine()==mask.get_affine()).all()) & (self.mask.shape[0:3]==mask.shape[0:3]):
                    mask = resample_img(mask,target_affine=self.mask.get_affine(),target_shape=self.mask.shape)
            else:
                raise ValueError("Mask is not a nibabel instance, Brain_Data instance, or a valid file name.")

        masked = deepcopy(self)
        nifti_masker = NiftiMasker(mask_img=mask)
        masked.data = nifti_masker.fit_transform(self.to_nifti())
        if len(self.data.shape) > 2:
            masked.data = masked.data.squeeze()
        masked.nifti_masker = nifti_masker
        return masked
示例#26
0
def ingest(raw, ingested, template, qc=False):

    template_im = nb.load(template)
    raw_im = nb.load(raw)

    ingested_im = nl.resample_img(raw_im,
                                  target_affine=template_im.get_affine(),
                                  target_shape=template_im.get_data().shape,
                                  interpolation='nearest')

    nb.save(ingested_im, ingested)

    if qc:
        t = template_im.get_data()
        dim1 = t.shape
        r = raw_im.get_data()
        dim2 = r.shape
        i = ingested_im.get_data()
        dim3 = i.shape
        name = os.path.splitext(os.path.splitext(ingested)[0])[0] + '_QC.png'
        show_slices(
            [t[dim1[0] / 2, :, :], t[:, dim1[1] / 2, :], t[:, :, dim1[2] / 2]],
            [r[dim2[0] / 2, :, :], r[:, dim2[1] / 2, :], r[:, :, dim2[2] / 2]],
            [i[dim3[0] / 2, :, :], i[:, dim3[1] / 2, :], i[:, :, dim3[2] / 2]],
            name)
示例#27
0
    def preprocess(self, imgs):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.resampling is None else 'r%g' % self.resampling

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None or self.smoothing_fwhm is not None:
            if self.resampling is not None and not os.path.exists(path_first_smoothed):
                if not os.path.exists(path_first_resampled):
                    imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
                else:
                    imgs = [os.path.join(os.path.dirname(img), resample_prefix + os.path.basename(img)) if isinstance(img, str)
                            else os.path.join(os.path.dirname(img.get_filename()), resample_prefix + os.path.basename(img.get_filename())) for img in imgs]
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = [os.path.join(os.path.dirname(img), smooth_prefix + resample_prefix + os.path.basename(img)) if isinstance(img, str)
                            else os.path.join(os.path.dirname(img.get_filename()), smooth_prefix + resample_prefix + os.path.basename(img.get_filename())) for img in imgs]
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return imgs
示例#28
0
def resample(input, ref, output):
    image_to_resample = nibabel.load(input)
    image2=nibabel.load(ref)
    resampled_image =image.resample_img(image_to_resample,target_affine = image2.get_affine(),
                                        interpolation="nearest",
                                        target_shape=image2.shape)
    nibabel.save(resampled_image,output)
def download_and_resample(images_df, dest_dir, target):
    """Downloads all stat maps and resamples them to a common space.
    """
    
    target_nii = nb.load(target)
    orig_path = os.path.join(dest_dir, "original")
    mkdir_p(orig_path)
    resampled_path = os.path.join(dest_dir, "resampled")
    mkdir_p(resampled_path)
    
    for row in combined_df.iterrows():
        # Downloading the file to the "original" subfolder
        _, _, ext = split_filename(row[1]['file'])
        orig_file = os.path.join(orig_path, "%04d%s" % (row[1]['image_id'], ext))
        if not os.path.exists(orig_file):
            urllib.urlretrieve(row[1]['file'], orig_file)
        
        # Resampling the file to target and saving the output in the "resampled"
        # folder
        resampled_file = os.path.join(resampled_path, 
            "%04d_2mm%s" % (row[1]['image_id'], ext))
        if not os.path.exists(resampled_file):
            resampled_nii = resample_img(orig_file, target_nii.get_affine(), 
                target_nii.shape)
            resampled_nii.to_filename(resampled_file)
示例#30
0
def preprocess_image(img):
    path = "path" # Path to reference image
    ref_img = nib.load(path)
    affine = ref_img.get_affine()
    shape = ref_img.get_shape()
    im_res = image.resample_img(img, target_affine=affine, target_shape=shape, interpolation='continuous')
    return im_res
示例#31
0
def _resample_to_self(img, interpolation):
    _, s, _ = np.linalg.svd(img.affine[0:3, 0:3])
    vsize = np.min(np.abs(s))
    img = image.resample_img(img,
                             target_affine=np.diag([vsize, vsize, vsize]),
                             interpolation=interpolation)
    return img
示例#32
0
    def transform(self, imgs, confounds=None):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.smoothing_fwhm is None else 'r%g' % self.smoothing_fwhm

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None and self.smoothing_fwhm is not None:
            if self.resampling is not None:
                if not os.path.exists(path_first_resampled) and not os.path.exists(path_first_smoothed):
                    imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
                else:
                    imgs = []
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = []
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return self.masker.transform(imgs)
示例#33
0
def _check_vol_to_surf_results(img, mesh):
    mni_mask = datasets.load_mni152_brain_mask()
    for kind, interpolation, mask_img in itertools.product(
        ['ball', 'line'], ['linear', 'nearest'], [mni_mask, None]):
        proj_1 = vol_to_surf(img,
                             mesh,
                             kind=kind,
                             interpolation=interpolation,
                             mask_img=mask_img)
        assert_true(proj_1.ndim == 1)
        img_rot = image.resample_img(img,
                                     target_affine=rotation(
                                         np.pi / 3., np.pi / 4.))
        proj_2 = vol_to_surf(img_rot,
                             mesh,
                             kind=kind,
                             interpolation=interpolation,
                             mask_img=mask_img)
        # The projection values for the rotated image should be close
        # to the projection for the original image
        diff = np.abs(proj_1 - proj_2) / np.abs(proj_1)
        assert_true(np.mean(diff[diff < np.inf]) < .03)
        img_4d = image.concat_imgs([img, img])
        proj_4d = vol_to_surf(img_4d,
                              mesh,
                              kind=kind,
                              interpolation=interpolation,
                              mask_img=mask_img)
        nodes, _ = surface.load_surf_mesh(mesh)
        assert_array_equal(proj_4d.shape, [nodes.shape[0], 2])
        assert_array_almost_equal(proj_4d[:, 0], proj_1, 3)
示例#34
0
    def test_single_subject_resampling(self):
        voxel_size = [3, 3, 3]

        # nilearn
        nilearn_resampled_img = resample_img(self.X[0],
                                             interpolation='nearest',
                                             target_affine=np.diag(voxel_size))
        nilearn_resampled_array = nilearn_resampled_img.dataobj

        # photon
        resampler = PipelineElement('ResampleImages',
                                    hyperparameters={},
                                    voxel_size=voxel_size,
                                    batch_size=1)
        single_resampled_img, _, _ = resampler.transform(self.X[0])

        branch = NeuroBranch('NeuroBranch', output_img=True)
        branch += resampler
        branch_resampled_img, _, _ = branch.transform(self.X[0])

        # assert
        self.assertIsInstance(single_resampled_img, np.ndarray)
        self.assertIsInstance(branch_resampled_img[0], Nifti1Image)

        self.assertTrue(
            np.array_equal(nilearn_resampled_array, single_resampled_img))
        self.assertTrue(
            np.array_equal(single_resampled_img,
                           branch_resampled_img[0].dataobj))
示例#35
0
def sl_filter_target(streamlines, target_mask, affine, seed_label,
                     target_label):
    from dipy.tracking.utils import target
    from nilearn.image import resample_img
    import numpy as np
    import os

    import nibabel as nib
    trk_file = nib.streamlines.load(streamlines)
    streams = trk_file.streamlines
    hdr = trk_file.header

    # resample mask to resolution of input data & get data
    target_resamp = resample_img(target_mask, affine)
    target_mask_bool = np.zeros(target_resamp.shape)
    target_mask_bool[
        target_resamp.get_data().round() > 0] = 1  # rounding is key!

    target_sl_generator = target(streams,
                                 target_mask_bool,
                                 affine,
                                 include=True)
    target_streams = list(target_sl_generator)

    # create new filtered streamlines .trk file
    tractogram = nib.streamlines.Tractogram(target_streams)
    tractogram.affine_to_rasmm = np.eye(4)
    trk_file = nib.streamlines.TrkFile(tractogram, header=hdr)

    target_streamlines = os.path.abspath(
        'target_streamlines_seed-%d_target-%d.trk' %
        (seed_label, target_label))
    nib.streamlines.save(trk_file, target_streamlines)

    return target_streamlines, seed_label, target_label
示例#36
0
def test_view_img():
    mni = datasets.load_mni152_template()
    with warnings.catch_warnings(record=True) as w:
        # Create a fake functional image by resample the template
        img = image.resample_img(mni, target_affine=3 * np.eye(3))
        html_view = html_stat_map.view_img(img)
        _check_html(html_view, title="Slice viewer")
        html_view = html_stat_map.view_img(img,
                                           threshold='95%',
                                           title="SOME_TITLE")
        _check_html(html_view, title="SOME_TITLE")
        html_view = html_stat_map.view_img(img, bg_img=mni)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, bg_img=None)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, threshold=2., vmax=4.)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, symmetric_cmap=False)
        img_4d = image.new_img_like(img, get_data(img)[:, :, :, np.newaxis])
        assert len(img_4d.shape) == 4
        html_view = html_stat_map.view_img(img_4d, threshold=2., vmax=4.)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img_4d, threshold=1e6)
        _check_html(html_view)

    # Check that all warnings were expected
    warnings_set = set(warning_.category for warning_ in w)
    expected_set = set([FutureWarning, UserWarning, DeprecationWarning])
    assert warnings_set.issubset(expected_set), (
        "the following warnings were not expected: {}").format(
            warnings_set.difference(expected_set))
示例#37
0
def preprocess_data(img):
    affine = img.affine
    img = image.resample_img(img,
                             target_affine=affine,
                             target_shape=[256, 256, 160],
                             interpolation="nearest")
    return img
示例#38
0
    def test_multi_subject_resampling(self):
        voxel_size = [3, 3, 3]

        # nilearn
        nilearn_resampled = resample_img(self.X[:3],
                                         interpolation='nearest',
                                         target_affine=np.diag(voxel_size))
        nilearn_resampled_img = [
            index_img(nilearn_resampled, i)
            for i in range(nilearn_resampled.shape[-1])
        ]
        nilearn_resampled_array = np.moveaxis(nilearn_resampled.dataobj, -1, 0)

        # photon
        resampler = PipelineElement('ResampleImages',
                                    hyperparameters={},
                                    voxel_size=voxel_size)
        resampled_img, _, _ = resampler.transform(self.X[:3])

        branch = NeuroBranch('NeuroBranch', output_img=True)
        branch += resampler
        branch_resampled_img, _, _ = branch.transform(self.X[:3])

        # assert
        self.assertIsInstance(resampled_img, np.ndarray)
        self.assertIsInstance(branch_resampled_img, list)
        self.assertIsInstance(branch_resampled_img[0], Nifti1Image)

        self.assertTrue(np.array_equal(nilearn_resampled_array, resampled_img))
        self.assertTrue(
            np.array_equal(branch_resampled_img[1].dataobj,
                           nilearn_resampled_img[1].dataobj))
def get_frequency_map(images_df, dest_dir, target):
    """
    """
    
    target_nii = nb.load(target)
    orig_path = os.path.join(dest_dir, "original")
    freq_map_data = np.zeros(target_nii.shape)
    
    for row in combined_df.iterrows():
        _, _, ext = split_filename(row[1]['file'])
        orig_file = os.path.join(orig_path, "%04d%s" % (row[1]['image_id'], ext))
        nb.load(orig_file)
        if not os.path.exists(orig_file):
            urllib.urlretrieve(row[1]['file'], orig_file)
        
        resampled_nii = resample_img(orig_file, target_nii.get_affine(), 
                                     target_nii.shape, 
                                     interpolation="nearest")
        data = resampled_nii.get_data()
        data[data != 0] = 1
        if len(data.shape) == 4:
            data.shape = data.shape[:3]
        freq_map_data += data
        
    return nb.Nifti1Image(freq_map_data, target_nii.get_affine())
示例#40
0
def not_in_mni(nii, plot=False):
    mask_nii = nb.load(
        os.path.join(settings.STATIC_ROOT, 'anatomical',
                     'MNI152_T1_2mm_brain_mask.nii.gz'))

    #resample to the smaller one
    if np.prod(nii.shape) > np.prod(mask_nii.shape):
        nan_mask = np.isnan(nii.get_data())
        if nan_mask.sum() > 0:
            nii.get_data()[nan_mask] = 0
        nii = resample_img(nii,
                           target_affine=mask_nii.get_affine(),
                           target_shape=mask_nii.get_shape(),
                           interpolation='nearest')
    else:
        mask_nii = resample_img(mask_nii,
                                target_affine=nii.get_affine(),
                                target_shape=nii.get_shape(),
                                interpolation='nearest')

    brain_mask = mask_nii.get_data() > 0
    excursion_set = np.logical_not(
        np.logical_or(nii.get_data() == 0, np.isnan(nii.get_data())))

    in_brain_voxels = np.logical_and(excursion_set, brain_mask).sum()
    out_of_brain_voxels = np.logical_and(excursion_set,
                                         np.logical_not(brain_mask)).sum()

    perc_mask_covered = in_brain_voxels / float(brain_mask.sum()) * 100.0
    if np.isnan(perc_mask_covered):
        perc_mask_covered = 0
    perc_voxels_outside_of_mask = out_of_brain_voxels / float(
        excursion_set.sum()) * 100.0

    if perc_mask_covered > 50:
        if perc_mask_covered < 90 and perc_voxels_outside_of_mask > 20:
            ret = True
        else:
            ret = False
    elif perc_mask_covered == 0:
        ret = True
    elif perc_voxels_outside_of_mask > 50:
        ret = True
    else:
        ret = False

    return ret, perc_mask_covered, perc_voxels_outside_of_mask
示例#41
0
def not_in_mni(nii, plot=False):
    this_path = os.path.abspath(os.path.dirname(__file__))
    mask_nii = nb.load(os.path.join(this_path, "static", "anatomical", "MNI152_T1_2mm_brain_mask.nii.gz"))

    # resample to the smaller one
    if np.prod(nii.shape) > np.prod(mask_nii.shape):
        nan_mask = np.isnan(nii.get_data())
        if nan_mask.sum() > 0:
            nii.get_data()[nan_mask] = 0
        nii = resample_img(
            nii, target_affine=mask_nii.get_affine(), target_shape=mask_nii.get_shape(), interpolation="nearest"
        )
    else:
        mask_nii = resample_img(
            mask_nii, target_affine=nii.get_affine(), target_shape=nii.get_shape(), interpolation="nearest"
        )

    brain_mask = mask_nii.get_data() > 0
    excursion_set = np.logical_not(np.logical_or(nii.get_data() == 0, np.isnan(nii.get_data())))

    # deals with AFNI files
    if len(excursion_set.shape) == 5:
        excursion_set = excursion_set[:, :, :, 0, 0]
    # deal with 4D files
    elif len(excursion_set.shape) == 4:
        excursion_set = excursion_set[:, :, :, 0]
    in_brain_voxels = np.logical_and(excursion_set, brain_mask).sum()
    out_of_brain_voxels = np.logical_and(excursion_set, np.logical_not(brain_mask)).sum()

    perc_mask_covered = in_brain_voxels / float(brain_mask.sum()) * 100.0
    if np.isnan(perc_mask_covered):
        perc_mask_covered = 0
    perc_voxels_outside_of_mask = out_of_brain_voxels / float(excursion_set.sum()) * 100.0

    if perc_mask_covered > 50:
        if perc_mask_covered < 90 and perc_voxels_outside_of_mask > 20:
            ret = True
        else:
            ret = False
    elif perc_mask_covered == 0:
        ret = True
    elif perc_voxels_outside_of_mask > 50:
        ret = True
    else:
        ret = False

    return ret, perc_mask_covered, perc_voxels_outside_of_mask
示例#42
0
def loader(anat, downsample, target_affine, dataroot, subject, maskpath, nrun,
           niifilename, labels, **kwargs):
    ''' 
    All parameters are submitted as cfg dictionary.
    Given parameters in cfg, return masked and concatenated over runs data 
    
    Input
    anat: MNI template
    downsample: 1 or 0
    target_affine: downsampling matrix
    dataroot: element of path to data
    subject: folder in dataroot with subject data
    maskpath: path to mask
    nrun: number of runs
    niifilename: how is the data file called
    labels: labels from load_labels function
    
    Output
    dict(nii_func=nii_func,nii_mean=nii_mean,masker=masker,nii_mask=nii_mask)
    nii_func: 4D data
    nii_mean: mean over 4th dimension
    masker: masker object from nibabel
    nii_mask: 3D mask
    '''
    nii_func = list()
    for r in range(nrun):
        fname = '{0}/{1}/run{2}/{3}'.format(dataroot, subject, r+1, niifilename) # Assumption about file location
        nii_img = load(fname, mmap=False)
        nii_img.set_sform(anat.get_sform())
        # Get mean over 4D
        nii_mean = mean_img(nii_img)
        # Masking
        nii_mask = load(maskpath)
        nii_mask.set_sform(anat.get_sform())
        # Binarize the mask
        nii_mask = check_binary(nii_mask)
        if downsample:
            nii_img = resample_img(nii_img, target_affine=target_affine)
            nii_mask = resample_img(nii_mask, target_affine=target_affine, interpolation='nearest')
        masker = NiftiMasker(nii_mask, standardize=True)
        nii_img = masker.fit_transform(nii_img)
        # Drop zero timepoints, zscore
        nii_img = drop_labels(nii_img, labels.get('to_drop_zeros')[r])
        nii_func.append(stats.zscore(nii_img, axis=0)) # zscore over time
    # throw data together
    nii_func = np.concatenate(nii_func)
    return dict(nii_func=nii_func, nii_mean=nii_mean, masker=masker, nii_mask=nii_mask)
示例#43
0
    def _contrast(self, contrast_id, contrast_values):
        contrast = None

        n_regressors = [dm.size for dm in self.design_mask_]
        contrast_values = check_contrast(contrast_values, n_regressors)

        for i, (glm, design_mask, con_val) in enumerate(
                zip(self.glm_, self.design_mask_, contrast_values)):

            if (con_val is None or np.all(con_val == 0) or con_val.size == 0
                or glm is None or np.any(con_val[~design_mask] != 0)):
                # contrast null for session, or design_matrix ill conditioned
                # or con_val is using a null regressor
                pass
            elif contrast is None:
                contrast = glm.contrast(
                    con_val[design_mask], contrast_type=self.contrast_type)
            else:
                contrast = contrast + glm.contrast(
                    con_val[design_mask], contrast_type=self.contrast_type)

        if contrast is None:
            return dict()

        mask_array = self.masker.mask_img_.get_data().astype('bool')
        affine = self.masker.mask_img_.get_affine()

        if self.output_z or self.output_stat:
            # compute the contrast and stat
            contrast.z_score()

        do_outputs = [self.output_z, self.output_stat,
                      self.output_effects, self.output_variance]
        estimates = ['z_score_', 'stat_', 'effect', 'variance']
        descrips = ['z statistic', 'Statistical value',
                    'Estimated effect', 'Estimated variance']
        outputs = []
        for (do_output, estimate, descrip) in zip(
                do_outputs, estimates, descrips):

            if do_output:
                result_map = np.zeros(mask_array.shape)
                result_map[mask_array] = getattr(contrast, estimate).squeeze()
                niimg = nb.Nifti1Image(result_map, affine=affine)
                if (self.target_affine is not None
                        or self.target_shape is not None):
                    niimg = resample_img(
                        niimg,
                        target_affine=self.target_affine,
                        target_shape=self.target_shape)
                output_dir = os.path.join(
                    self.output_dir, '%s_maps' % estimate.rsplit('_')[0])
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                map_path = os.path.join(output_dir, '%s.nii.gz' % contrast_id)
                niimg.to_filename(map_path)
                outputs.append(map_path)

        return outputs
 def transform(self, f):
     if isinstance(f, str):
         f = nib.load(f)
     if f.shape != self._mask_image.shape:
         f = image.resample_img(f,
                                target_shape=self._mask_image.shape,
                                target_affine=self._mask_image.get_affine())
     return np.array(f.get_data()[self._indexes])
示例#45
0
    def fit(self):

        if self.resampling is not None:
            self.mask_img = resample_img(self.mask_img, target_affine=np.diag(self.resampling * np.ones(3)))
        self.masker = NiftiMasker(mask_img=self.mask_img)
        self.masker.fit()

        return self
示例#46
0
def spatial_normalize_image(image,standard):
   # Make sure we have nibabel image objects
   if determine_read_data(standard):
     standard = read_image(standard)
   if determine_read_data(image):
     image = read_image(image)
   header = resample_img(image, target_affine=standard.get_affine(),target_shape=standard.shape[:3])
   transformed = header.get_data()
   return transformed
示例#47
0
def get_standard_brain(voxdim=2):
    mr_directory = get_data_directory()
    brain = "%s/MNI152_T1_%smm_brain.nii.gz" %(mr_directory,voxdim)
    if not os.path.exists(brain):
        brain = nib.load("%s/MNI152_T1_2mm_brain.nii.gz" %(mr_directory))
        brain = resample_img(brain,target_affine=numpy.diag([voxdim,voxdim,voxdim]))
        return brain
    else:
        return nib.load(brain)
示例#48
0
def get_standard_mask(voxdim=2):
    mr_directory = get_data_directory()
    mask = "%s/MNI152_T1_%smm_brain_mask.nii.gz" %(mr_directory,voxdim)
    if not os.path.exists(mask):
        mask = nib.load("%s/MNI152_T1_2mm_brain_mask.nii.gz" %(mr_directory))
        mask = resample_img(mask,target_affine=numpy.diag([voxdim,voxdim,voxdim]),interpolation="nearest")
        return mask
    else:
        return nib.load(mask)
示例#49
0
    def transform(self, imgs, confounds=None):
        if self.smoothing_fwhm is not None or self.target_affine is not None:
            if self.smoothing_fwhm is not None:
                imgs = smooth_img(imgs, self.smoothing_fwhm)
            if self.target_affine is not None:
                imgs = resample_img(imgs, target_affine=self.target_affine)
        else:
            imgs = [check_niimg_3d(img) for img in imgs] if isinstance(imgs, list) else check_niimg_3d(imgs)

        return imgs
示例#50
0
文件: image.py 项目: Neurita/pypes
def resample(in_file, **kwargs):
    """ Use nilearn.image.resample_img.

    Returns
    -------
    out_file: str
        The absolute path to the output file.
    """
    from nilearn.image import resample_img
    return resample_img(img=in_file, **kwargs)
示例#51
0
    def _contrast(self, contrast_id, contrast_values):
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        contrast = None

        n_regressors = [glm.X.shape[1] for glm in self.glm_]
        contrast_values = check_contrast(contrast_values, n_regressors)

        for i, (glm, con_val) in enumerate(zip(self.glm_, contrast_values)):
            if con_val is None or np.all(con_val == 0):
                pass  # print 'Contrast for session %d is null' % i
            elif contrast is None:
                contrast = glm.contrast(
                    con_val, contrast_type=self.contrast_type)
            else:
                contrast = contrast + glm.contrast(
                    con_val, contrast_type=self.contrast_type)

        if contrast is None:
            return dict()

        mask_array = self.masker.mask_img_.get_data().astype('bool')
        affine = self.masker.mask_img_.get_affine()

        if self.output_z or self.output_stat:
            # compute the contrast and stat
            contrast.z_score()

        do_outputs = [self.output_z, self.output_stat,
                      self.output_effects, self.output_variance]
        estimates = ['z_score_', 'stat_', 'effect', 'variance']
        descrips = ['z statistic', 'Statistical value',
                    'Estimated effect', 'Estimated variance']
        outputs = []
        for (do_output, estimate, descrip) in zip(
                do_outputs, estimates, descrips):

            if do_output:
                result_map = np.zeros(mask_array.shape)
                result_map[mask_array] = getattr(contrast, estimate).squeeze()
                niimg = nb.Nifti1Image(result_map, affine=affine)
                if (self.target_affine is not None
                        or self.target_shape is not None):
                    niimg = resample_img(
                        niimg,
                        target_affine=self.target_affine,
                        target_shape=self.target_shape)

                niimg_path = os.path.join(
                    self.output_dir, '%s_map.nii.gz' % contrast_id)
                niimg.to_filename(niimg_path)
                outputs.append(niimg_path)
        return outputs
示例#52
0
def test_view_stat_map():
    mni = datasets.load_mni152_template()
    # Create a fake functional image by resample the template
    img = image.resample_img(mni, target_affine=3*np.eye(3))
    html = html_stat_map.view_stat_map(img)
    _check_html(html)
    html = html_stat_map.view_stat_map(img, threshold='95%')
    _check_html(html)
    html = html_stat_map.view_stat_map(img, bg_img=mni)
    _check_html(html)
    html = html_stat_map.view_stat_map(img, threshold=2., vmax=4.)
    _check_html(html)
示例#53
0
def load_image(masker, filename, save_resampled=True):
    """ Load an image, resampling into MNI space if needed. """
    filename = join(settings.DECODED_IMAGE_DIR, filename)
    img = nb.load(filename)
    if img.shape[:3] != (91, 109, 91):
        img = resample_img(
            img, target_affine=decode_image.anatomical.get_affine(),
            target_shape=(91, 109, 91), interpolation='nearest')
        if save_resampled:
            unlink(filename)
            img.to_filename(filename)
    return masker.mask(img)
示例#54
0
def resize_image(input_image,voxel_dimension):
  if len(voxel_dimension) != 3:
    print "Please specify a list of three voxel sizes, e.g. (3,3,3)"
    return
  if determine_read_data(input_image):
    header = nibabel.load(input_image)
  else:
    header = input_image
  voxel_dimension = tuple(voxel_dimension)
  target_affine = np.diag(voxel_dimension)
  resampled = resample_img(header, target_affine=target_affine)
  return resampled
示例#55
0
    def plotCorrelationResults(self, background, overlay, desired_resolution_file_location):
        image_to_resample = nibabel.load(background)
        image_to_use_for_sample = image.index_img(desired_resolution_file_location, 0)
        resampled_background = resample_img(image_to_resample,target_affine = image_to_use_for_sample.get_affine(), target_shape=image_to_use_for_sample.shape)

        mri_args = {
            'background' : resampled_background,
            'cmap_bg' : 'gray',
            'cmap_overlay' : 'PiYG', # YlOrRd_r # pl.cm.autumn
            'interactive' : cfg.getboolean('examples', 'interactive', True),
            }

        plot_lightbox(overlay=overlay, vlim=(-1.0, 1.0), do_stretch_colors=True, **mri_args)
示例#56
0
def compute_confounds(imgs, mask_img, n_confounds=5, get_randomized_svd=False,
                      compute_not_mask=False):
    """
    """
    confounds = []
    if not isinstance(imgs, collections.Iterable) or \
            isinstance(imgs, _basestring):
        imgs = [imgs, ]

    img = _utils.check_niimg_4d(imgs[0])
    shape = img.shape[:3]
    affine = get_affine(img)

    if isinstance(mask_img, _basestring):
        mask_img = _utils.check_niimg_3d(mask_img)

    if not _check_same_fov(img, mask_img):
        mask_img = resample_img(
            mask_img, target_shape=shape, target_affine=affine,
            interpolation='nearest')

    if compute_not_mask:
        print("Non mask based confounds extraction")
        not_mask_data = np.logical_not(mask_img.get_data().astype(np.int))
        whole_brain_mask = masking.compute_multi_epi_mask(imgs)
        not_mask = np.logical_and(not_mask_data, whole_brain_mask.get_data())
        mask_img = new_img_like(img, not_mask.astype(np.int), affine)

    for img in imgs:
        print("[Confounds Extraction] {0}".format(img))
        img = _utils.check_niimg_4d(img)
        print("[Confounds Extraction] high ariance confounds computation]")
        high_variance = high_variance_confounds(img, mask_img=mask_img,
                                                n_confounds=n_confounds)
        if compute_not_mask and get_randomized_svd:
            signals = masking.apply_mask(img, mask_img)
            non_constant = np.any(np.diff(signals, axis=0) != 0, axis=0)
            signals = signals[:, non_constant]
            signals = signal.clean(signals, detrend=True)
            print("[Confounds Extraction] Randomized SVD computation")
            U, s, V = randomized_svd(signals, n_components=n_confounds,
                                     random_state=0)
            if high_variance is not None:
                confound_ = np.hstack((U, high_variance))
            else:
                confound_ = U
        else:
            confound_ = high_variance
        confounds.append(confound_)

    return confounds
示例#57
0
def not_in_mni(nii, plot=False):
    mask_nii = nb.load(os.path.join(settings.STATIC_ROOT,'anatomical','MNI152_T1_2mm_brain_mask.nii.gz'))
    
    #resample to the smaller one
    if np.prod(nii.shape) > np.prod(mask_nii.shape):
        nan_mask = np.isnan(nii.get_data())
        if nan_mask.sum() > 0:
            nii.get_data()[nan_mask] = 0
        nii = resample_img(nii, target_affine=mask_nii.get_affine(), target_shape=mask_nii.get_shape(),interpolation='nearest')
    else:
        mask_nii = resample_img(mask_nii, target_affine=nii.get_affine(), target_shape=nii.get_shape(),interpolation='nearest')
    
    brain_mask = mask_nii.get_data() > 0
    excursion_set = np.logical_not(np.logical_or(nii.get_data() == 0, np.isnan(nii.get_data())))    
    
    in_brain_voxels = np.logical_and(excursion_set, brain_mask).sum()
    out_of_brain_voxels = np.logical_and(excursion_set, np.logical_not(brain_mask)).sum()
    
    
    perc_mask_covered = in_brain_voxels/float(brain_mask.sum())*100.0
    if np.isnan(perc_mask_covered):
        perc_mask_covered = 0
    perc_voxels_outside_of_mask = out_of_brain_voxels/float(excursion_set.sum())*100.0
    
    if perc_mask_covered > 50:
        if perc_mask_covered < 90 and perc_voxels_outside_of_mask > 20:
            ret = True
        else:
            ret = False
    elif perc_mask_covered == 0:
        ret = True
    elif perc_voxels_outside_of_mask > 50:
        ret = True
    else:
        ret = False
    
    return ret, perc_mask_covered, perc_voxels_outside_of_mask
示例#58
0
def SingleSubject(cfg):
    '''
    Pipeline to run basic signle-subject classification
    '''

    ## Get crossval scheme, can modify here later if we have variable schemes
    crossval = Analysis.get_crossval(**cfg)
    cfg.update(crossval=crossval)    
    print('Crossval  acquired')

    ## Load the data
    data_test = load_data(cfg)
    cfg.update(data_test=data_test.get('nii_func'),
               nii_mean=data_test.get('nii_mean'),
               nii_mask=data_test.get('nii_mask'),
               masker=data_test.get('masker'))
    print('Data loaded')
    
    ## CLassify
    cv_scores,y_pred_all = Analysis.classify(**cfg)
    coef_img,coef_2mm = Analysis.get_impmap(**cfg)    
    print('Classification done')

    ## Searchlight
    sl_4mm = slight.get_searchlight(**cfg)
    # Upsample
    sl_2mm = resample_img(sl_4mm,target_affine = np.diag((2,2,2)))
    print('Searchlight done')
    
    ##  Permutations
    null_cv_scores = Permutations.get_permutations(**cfg)
    null_plot = Permutations.plot_permutation(null_cv_scores)       
    print('Permutations done and plotted')

    ## Confmat
    cm = Analysis.get_confmat(cfg.get('labels').get('regressor'),y_pred_all)
    cm_plot = Analysis.plot_confusion_matrix(cm)    
    print('Confmat done and plotted')

    ## Cook results
    results = dict(accuracy = np.mean(cv_scores),
                   impmap_4mm = coef_img, impmap_2mm = coef_2mm,
                   searchlight_4mm = sl_4mm, searchlight_2mm = sl_2mm,
                   permutation_score = null_cv_scores[1],
                   permutation_plot = null_plot,
                   confusion_matrix = cm,
                   confusion_matrix_plot = cm_plot)
    print('Returning results')
    return results