def make_resampled_transformation(nii_obj,resample_dim=[4,4,4],standard_mask=True):

    nii_obj = get_nii_obj(nii_obj)[0]

    # To set 0s to nan, we need to have float64 data type
    true_zeros = numpy.zeros(nii_obj.shape) # default data_type is float64
    true_zeros[:] = nii_obj.get_data()
    true_zeros[true_zeros==0] = numpy.nan

    # Resample image to 4mm voxel, nans are preserved
    true_zeros = nib.nifti1.Nifti1Image(true_zeros,affine=nii_obj.get_affine())
    
    # Standard brain masking
    if standard_mask == True:
        standard = get_standard_mask(voxdim=resample_dim[0])
        true_zeros = resample_img(true_zeros,target_affine=standard.get_affine(), 
                                  target_shape=standard.shape)
      
        # Mask the image 
        masked_true_zeros = numpy.zeros(true_zeros.shape)
        masked_true_zeros[standard.get_data()!=0] = true_zeros.get_data()[standard.get_data()!=0]
        true_zeros = nib.nifti1.Nifti1Image(masked_true_zeros,affine=true_zeros.get_affine())

    # or just resample
    else: 
        if (resample_dim != numpy.diag(true_zeros.get_affine())[0:3]).all():
            true_zeros = resample_img(true_zeros,target_affine=numpy.diag(resample_dim))

    return true_zeros
Exemple #2
0
def resample_images_ref(images,reference,interpolation,resample_dim=None):
    '''Resample many images to single reference

    images: nibabal.Nifti1Image list 
        should be list of image files or nibabel images
    
    reference: nibabel.Nifti1Image
        single image file or nibabel image
    '''

    if isinstance(reference,str): reference = nibabel.load(reference)
    if resample_dim:
        affine = numpy.diag(resample_dim)
        reference = resample_img(reference, target_affine=affine)

    # Resample images to match reference mask affine and shape
    if not isinstance(images,list): images = [images]
    images_nii = get_nii_obj(images)

    # Make sure we don't have any with singleton dimension
    images = squeeze_fourth_dimension(images_nii)

    images_resamp = []
    for image in images_nii:
        # Only resample if the image is different from the reference
        if not (image.get_affine() == reference.get_affine()).all():
            resampled_img = resample_img(image,target_affine=reference.get_affine(), 
                                         target_shape=reference.shape,
                                         interpolation=interpolation)
        else: 
            resampled_img = image
        
        images_resamp.append(resampled_img)
    
    return images_resamp, reference
Exemple #3
0
    def fit(self):

        if self.resampling is not None:
            resample = np.diag(self.resampling * np.ones(3))
        else:
            resample =  None

        self.mask_img = resample_img(self.mask_img, target_affine=resample, interpolation='nearest')

        if not isinstance(self.rois, tuple):
            self.masker = dict()
            for roi_id, roi in enumerate(self.rois):
                if self.resampling is not None:
                    roi = resample_img(roi, target_affine=resample, interpolation='nearest')
                self.masker[roi_id] = NiftiMasker(mask_img=roi)
                self.masker[roi_id].fit()
        else:
            self.masker = [None] * len(self.rois)  # first create as list..
            for m, rois_modality in enumerate(self.rois):
                self.masker[m] = dict()
                for roi_id, roi in enumerate(rois_modality):
                    if self.resampling is not None:
                        roi = resample_img(roi, target_affine=resample, interpolation='nearest')
                    self.masker[m][roi_id] = NiftiMasker(mask_img=roi)
                    self.masker[m][roi_id].fit()
            self.masker = tuple(self.masker)  # .. then make conform again

        return self
    def fit(self, X, y):
        """Fit estimators from the training set (X, y).

        Returns
        -------
        self : object
            Returns self.
        """

        if self.base_estimator_._estimator_type == 'classifier':
            scoring = None
        else:
            scoring = 'mean_squared_error'

        X_test = nibabel.load(X[0]) if isinstance(X[0], str) else X[0]
        mask_test = nibabel.load(self.mask_img) if isinstance(self.mask_img, str) else self.mask_img
        process_mask_test = nibabel.load(self.process_mask_img) if isinstance(self.process_mask_img, str) else self.process_mask_img
        if not np.array_equal(X_test.affine, mask_test.affine) or not np.array_equal(X_test.shape, mask_test.shape):
            self.mask_img = resample_img(mask_test, target_affine=X_test.affine, target_shape=X_test.shape, interpolation='nearest')
        if not np.array_equal(X_test.affine, process_mask_test.affine) or not np.array_equal(X_test.shape, process_mask_test.shape):
            self.process_mask_img = resample_img(process_mask_test, target_affine=X_test.affine, target_shape=X_test.shape, interpolation='nearest')

        searchlight = SearchLight(self.mask_img, process_mask_img=self.process_mask_img, estimator=self.base_estimator_, scoring=scoring,
                                  radius=self.radius, n_jobs=self.n_jobs, estimator_params=self.base_estimator_args, cv=LeaveOneOut(n=len(y)))
        searchlight.fit(X, y)
        if np.all(searchlight.scores_ == 0):
            raise RuntimeError('Ooops, something went probably wrong: all searchlight scores have value 0.')
        if self.base_estimator_._estimator_type == 'classifier':
            best_centers = np.unravel_index(np.argsort(searchlight.scores_, axis=None)[-self.n_estimators:],
                                            searchlight.scores_.shape)
        else:
            best_centers = np.unravel_index(np.argsort(.1/(-searchlight.scores_ - 1e-30), axis=None)[-self.n_estimators:],
                                            searchlight.scores_.shape)
        self.best_spheres = get_sphere_indices(self.mask_img, np.array(best_centers).T.tolist(), self.radius)

        # for v in range(self.n_estimators):
        #     self.estimators_ += [ESTIMATOR_CATALOG[searchlight.estimator](**self.estimator_params)]
        #     self.estimators_[v].fit(np.array([x.get_data()[self.best_spheres[v]] for x in X]), y)

        estimators = []
        for i in range(self.n_estimators):
            estimator = self._make_estimator(append=False)
            estimators.append(estimator)

        if not isinstance(X[0], nibabel.Nifti1Image):
            X = [nibabel.load(x) for x in X]


        # for v, e in enumerate(estimators):
        #     print(v)
        #     _parallel_build_estimator(e, np.array([x.get_data()[self.best_spheres[v]] for x in X]), y)

        estimators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(
                     delayed(_parallel_build_estimator)(e, np.array([x.get_data()[self.best_spheres[v]] for x in X]), y)
                     for v, e in enumerate(estimators))

        self.estimators_ = estimators

        return self
def _gen_reference(fixed_image, moving_image, out_file=None):
    import numpy
    from nilearn.image import resample_img, load_img

    if out_file is None:
        out_file = genfname(fixed_image, suffix='reference')
    new_zooms = load_img(moving_image).header.get_zooms()[:3]
    # Avoid small differences in reported resolution to cause changes to
    # FOV. See https://github.com/poldracklab/fmriprep/issues/512
    new_zooms_round = numpy.round(new_zooms, 3)
    resample_img(fixed_image, target_affine=numpy.diag(new_zooms_round),
                 interpolation='nearest').to_filename(out_file)
    return out_file
Exemple #6
0
def not_in_mni(nii, target_template_image=DEFAULT_TEMPLATE, plot=False):
    this_path = os.path.abspath(os.path.dirname(__file__))

    POSSIBLE_TEMPLATES = get_possible_templates()
    mask_path = POSSIBLE_TEMPLATES[target_template_image]['mask']
    if mask_path==None:
        return False, 100.0, 100.0

    mask_nii = nb.load(os.path.join(this_path, "static", 'anatomical',mask_path))

    #resample to the smaller one
    if np.prod(nii.shape) > np.prod(mask_nii.shape):
        nan_mask = np.isnan(nii.get_data())
        if nan_mask.sum() > 0:
            nii.get_data()[nan_mask] = 0
        nii = resample_img(nii, target_affine=mask_nii.get_affine(), target_shape=mask_nii.get_shape(),interpolation='nearest')
    else:
        mask_nii = resample_img(mask_nii, target_affine=nii.get_affine(), target_shape=nii.get_shape(),interpolation='nearest')

    brain_mask = mask_nii.get_data() > 0
    excursion_set = np.logical_not(np.logical_or(nii.get_data() == 0, np.isnan(nii.get_data())))

    # deals with AFNI files
    if len(excursion_set.shape) == 5:
        excursion_set = excursion_set[:, :, :, 0, 0]
    # deal with 4D files
    elif len(excursion_set.shape) == 4:
        excursion_set = excursion_set[:, :, :, 0]
    in_brain_voxels = np.logical_and(excursion_set, brain_mask).sum()
    out_of_brain_voxels = np.logical_and(excursion_set, np.logical_not(brain_mask)).sum()


    perc_mask_covered = in_brain_voxels/float(brain_mask.sum())*100.0
    if np.isnan(perc_mask_covered):
        perc_mask_covered = 0
    perc_voxels_outside_of_mask = out_of_brain_voxels/float(excursion_set.sum())*100.0

    if perc_mask_covered > 50:
        if perc_mask_covered < 90 and perc_voxels_outside_of_mask > 20:
            ret = True
        else:
            ret = False
    elif perc_mask_covered == 0:
        ret = True
    elif perc_voxels_outside_of_mask > 50:
        ret = True
    else:
        ret = False

    return ret, perc_mask_covered, perc_voxels_outside_of_mask
def download_and_resample(images_df, dest_dir, target):
    """Downloads all stat maps and resamples them to a common space.
    """
    
    target_nii = nb.load(target)
    orig_path = os.path.join(dest_dir, "original")
    mkdir_p(orig_path)
    resampled_path = os.path.join(dest_dir, "resampled")
    mkdir_p(resampled_path)
    
    for row in combined_df.iterrows():
        # Downloading the file to the "original" subfolder
        _, _, ext = split_filename(row[1]['file'])
        orig_file = os.path.join(orig_path, "%04d%s" % (row[1]['image_id'], ext))
        if not os.path.exists(orig_file):
            urllib.urlretrieve(row[1]['file'], orig_file)
        
        # Resampling the file to target and saving the output in the "resampled"
        # folder
        resampled_file = os.path.join(resampled_path, 
            "%04d_2mm%s" % (row[1]['image_id'], ext))
        if not os.path.exists(resampled_file):
            resampled_nii = resample_img(orig_file, target_nii.get_affine(), 
                target_nii.shape)
            resampled_nii.to_filename(resampled_file)
def get_frequency_map(images_df, dest_dir, target):
    """
    """
    
    target_nii = nb.load(target)
    orig_path = os.path.join(dest_dir, "original")
    freq_map_data = np.zeros(target_nii.shape)
    
    for row in combined_df.iterrows():
        _, _, ext = split_filename(row[1]['file'])
        orig_file = os.path.join(orig_path, "%04d%s" % (row[1]['image_id'], ext))
        nb.load(orig_file)
        if not os.path.exists(orig_file):
            urllib.urlretrieve(row[1]['file'], orig_file)
        
        resampled_nii = resample_img(orig_file, target_nii.get_affine(), 
                                     target_nii.shape, 
                                     interpolation="nearest")
        data = resampled_nii.get_data()
        data[data != 0] = 1
        if len(data.shape) == 4:
            data.shape = data.shape[:3]
        freq_map_data += data
        
    return nb.Nifti1Image(freq_map_data, target_nii.get_affine())
Exemple #9
0
    def preprocess(self, imgs):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.resampling is None else 'r%g' % self.resampling

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None or self.smoothing_fwhm is not None:
            if self.resampling is not None and not os.path.exists(path_first_smoothed):
                if not os.path.exists(path_first_resampled):
                    imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
                else:
                    imgs = [os.path.join(os.path.dirname(img), resample_prefix + os.path.basename(img)) if isinstance(img, str)
                            else os.path.join(os.path.dirname(img.get_filename()), resample_prefix + os.path.basename(img.get_filename())) for img in imgs]
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = [os.path.join(os.path.dirname(img), smooth_prefix + resample_prefix + os.path.basename(img)) if isinstance(img, str)
                            else os.path.join(os.path.dirname(img.get_filename()), smooth_prefix + resample_prefix + os.path.basename(img.get_filename())) for img in imgs]
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return imgs
Exemple #10
0
    def transform(self, imgs, confounds=None):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.smoothing_fwhm is None else 'r%g' % self.smoothing_fwhm

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None and self.smoothing_fwhm is not None:
            if self.resampling is not None:
                if not os.path.exists(path_first_resampled) and not os.path.exists(path_first_smoothed):
                    imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
                else:
                    imgs = []
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = []
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return self.masker.transform(imgs)
def test_view_img():
    mni = datasets.load_mni152_template()
    with warnings.catch_warnings(record=True) as w:
        # Create a fake functional image by resample the template
        img = image.resample_img(mni, target_affine=3 * np.eye(3))
        html_view = html_stat_map.view_img(img)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, threshold='95%')
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, bg_img=mni)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, bg_img=None)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, threshold=2., vmax=4.)
        _check_html(html_view)
        html_view = html_stat_map.view_img(img, symmetric_cmap=False)
        img_4d = image.new_img_like(img, img.get_data()[:, :, :, np.newaxis])
        assert len(img_4d.shape) == 4
        html_view = html_stat_map.view_img(img_4d, threshold=2., vmax=4.)
        _check_html(html_view)

    # Check that all warnings were expected
    warnings_set = set(warning_.category for warning_ in w)
    expected_set = set([FutureWarning, UserWarning,
                       DeprecationWarning])
    assert warnings_set.issubset(expected_set), (
        "the following warnings were not expected: {}").format(
        warnings_set.difference(expected_set))
Exemple #12
0
    def apply_mask(self, mask):
        """ Mask Brain_Data instance

        Args:
            mask: mask (Brain_Data or nifti object)
            
        """

        if isinstance(mask,Brain_Data):
            mask = mask.to_nifti() # convert to nibabel
        if not isinstance(mask, nib.Nifti1Image):
            if type(mask) is str:
                if os.path.isfile(mask):
                    mask = nib.load(mask)
               # Check if mask need to be resampled into Brain_Data mask space
                if not ((self.mask.get_affine()==mask.get_affine()).all()) & (self.mask.shape[0:3]==mask.shape[0:3]):
                    mask = resample_img(mask,target_affine=self.mask.get_affine(),target_shape=self.mask.shape)
            else:
                raise ValueError("Mask is not a nibabel instance, Brain_Data instance, or a valid file name.")

        masked = deepcopy(self)
        nifti_masker = NiftiMasker(mask_img=mask)
        masked.data = nifti_masker.fit_transform(self.to_nifti())
        if len(self.data.shape) > 2:
            masked.data = masked.data.squeeze()
        masked.nifti_masker = nifti_masker
        return masked
Exemple #13
0
def not_in_mni(nii, plot=False):
    this_path = os.path.abspath(os.path.dirname(__file__))
    mask_nii = nb.load(os.path.join(this_path, "static", "anatomical", "MNI152_T1_2mm_brain_mask.nii.gz"))

    # resample to the smaller one
    if np.prod(nii.shape) > np.prod(mask_nii.shape):
        nan_mask = np.isnan(nii.get_data())
        if nan_mask.sum() > 0:
            nii.get_data()[nan_mask] = 0
        nii = resample_img(
            nii, target_affine=mask_nii.get_affine(), target_shape=mask_nii.get_shape(), interpolation="nearest"
        )
    else:
        mask_nii = resample_img(
            mask_nii, target_affine=nii.get_affine(), target_shape=nii.get_shape(), interpolation="nearest"
        )

    brain_mask = mask_nii.get_data() > 0
    excursion_set = np.logical_not(np.logical_or(nii.get_data() == 0, np.isnan(nii.get_data())))

    # deals with AFNI files
    if len(excursion_set.shape) == 5:
        excursion_set = excursion_set[:, :, :, 0, 0]
    # deal with 4D files
    elif len(excursion_set.shape) == 4:
        excursion_set = excursion_set[:, :, :, 0]
    in_brain_voxels = np.logical_and(excursion_set, brain_mask).sum()
    out_of_brain_voxels = np.logical_and(excursion_set, np.logical_not(brain_mask)).sum()

    perc_mask_covered = in_brain_voxels / float(brain_mask.sum()) * 100.0
    if np.isnan(perc_mask_covered):
        perc_mask_covered = 0
    perc_voxels_outside_of_mask = out_of_brain_voxels / float(excursion_set.sum()) * 100.0

    if perc_mask_covered > 50:
        if perc_mask_covered < 90 and perc_voxels_outside_of_mask > 20:
            ret = True
        else:
            ret = False
    elif perc_mask_covered == 0:
        ret = True
    elif perc_voxels_outside_of_mask > 50:
        ret = True
    else:
        ret = False

    return ret, perc_mask_covered, perc_voxels_outside_of_mask
Exemple #14
0
def loader(anat, downsample, target_affine, dataroot, subject, maskpath, nrun,
           niifilename, labels, **kwargs):
    ''' 
    All parameters are submitted as cfg dictionary.
    Given parameters in cfg, return masked and concatenated over runs data 
    
    Input
    anat: MNI template
    downsample: 1 or 0
    target_affine: downsampling matrix
    dataroot: element of path to data
    subject: folder in dataroot with subject data
    maskpath: path to mask
    nrun: number of runs
    niifilename: how is the data file called
    labels: labels from load_labels function
    
    Output
    dict(nii_func=nii_func,nii_mean=nii_mean,masker=masker,nii_mask=nii_mask)
    nii_func: 4D data
    nii_mean: mean over 4th dimension
    masker: masker object from nibabel
    nii_mask: 3D mask
    '''
    nii_func = list()
    for r in range(nrun):
        fname = '{0}/{1}/run{2}/{3}'.format(dataroot, subject, r+1, niifilename) # Assumption about file location
        nii_img = load(fname, mmap=False)
        nii_img.set_sform(anat.get_sform())
        # Get mean over 4D
        nii_mean = mean_img(nii_img)
        # Masking
        nii_mask = load(maskpath)
        nii_mask.set_sform(anat.get_sform())
        # Binarize the mask
        nii_mask = check_binary(nii_mask)
        if downsample:
            nii_img = resample_img(nii_img, target_affine=target_affine)
            nii_mask = resample_img(nii_mask, target_affine=target_affine, interpolation='nearest')
        masker = NiftiMasker(nii_mask, standardize=True)
        nii_img = masker.fit_transform(nii_img)
        # Drop zero timepoints, zscore
        nii_img = drop_labels(nii_img, labels.get('to_drop_zeros')[r])
        nii_func.append(stats.zscore(nii_img, axis=0)) # zscore over time
    # throw data together
    nii_func = np.concatenate(nii_func)
    return dict(nii_func=nii_func, nii_mean=nii_mean, masker=masker, nii_mask=nii_mask)
Exemple #15
0
    def _contrast(self, contrast_id, contrast_values):
        contrast = None

        n_regressors = [dm.size for dm in self.design_mask_]
        contrast_values = check_contrast(contrast_values, n_regressors)

        for i, (glm, design_mask, con_val) in enumerate(
                zip(self.glm_, self.design_mask_, contrast_values)):

            if (con_val is None or np.all(con_val == 0) or con_val.size == 0
                or glm is None or np.any(con_val[~design_mask] != 0)):
                # contrast null for session, or design_matrix ill conditioned
                # or con_val is using a null regressor
                pass
            elif contrast is None:
                contrast = glm.contrast(
                    con_val[design_mask], contrast_type=self.contrast_type)
            else:
                contrast = contrast + glm.contrast(
                    con_val[design_mask], contrast_type=self.contrast_type)

        if contrast is None:
            return dict()

        mask_array = self.masker.mask_img_.get_data().astype('bool')
        affine = self.masker.mask_img_.get_affine()

        if self.output_z or self.output_stat:
            # compute the contrast and stat
            contrast.z_score()

        do_outputs = [self.output_z, self.output_stat,
                      self.output_effects, self.output_variance]
        estimates = ['z_score_', 'stat_', 'effect', 'variance']
        descrips = ['z statistic', 'Statistical value',
                    'Estimated effect', 'Estimated variance']
        outputs = []
        for (do_output, estimate, descrip) in zip(
                do_outputs, estimates, descrips):

            if do_output:
                result_map = np.zeros(mask_array.shape)
                result_map[mask_array] = getattr(contrast, estimate).squeeze()
                niimg = nb.Nifti1Image(result_map, affine=affine)
                if (self.target_affine is not None
                        or self.target_shape is not None):
                    niimg = resample_img(
                        niimg,
                        target_affine=self.target_affine,
                        target_shape=self.target_shape)
                output_dir = os.path.join(
                    self.output_dir, '%s_maps' % estimate.rsplit('_')[0])
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                map_path = os.path.join(output_dir, '%s.nii.gz' % contrast_id)
                niimg.to_filename(map_path)
                outputs.append(map_path)

        return outputs
 def transform(self, f):
     if isinstance(f, str):
         f = nib.load(f)
     if f.shape != self._mask_image.shape:
         f = image.resample_img(f,
                                target_shape=self._mask_image.shape,
                                target_affine=self._mask_image.get_affine())
     return np.array(f.get_data()[self._indexes])
Exemple #17
0
    def fit(self):

        if self.resampling is not None:
            self.mask_img = resample_img(self.mask_img, target_affine=np.diag(self.resampling * np.ones(3)))
        self.masker = NiftiMasker(mask_img=self.mask_img)
        self.masker.fit()

        return self
Exemple #18
0
def spatial_normalize_image(image,standard):
   # Make sure we have nibabel image objects
   if determine_read_data(standard):
     standard = read_image(standard)
   if determine_read_data(image):
     image = read_image(image)
   header = resample_img(image, target_affine=standard.get_affine(),target_shape=standard.shape[:3])
   transformed = header.get_data()
   return transformed
Exemple #19
0
def get_standard_brain(voxdim=2):
    mr_directory = get_data_directory()
    brain = "%s/MNI152_T1_%smm_brain.nii.gz" %(mr_directory,voxdim)
    if not os.path.exists(brain):
        brain = nib.load("%s/MNI152_T1_2mm_brain.nii.gz" %(mr_directory))
        brain = resample_img(brain,target_affine=numpy.diag([voxdim,voxdim,voxdim]))
        return brain
    else:
        return nib.load(brain)
Exemple #20
0
def get_standard_mask(voxdim=2):
    mr_directory = get_data_directory()
    mask = "%s/MNI152_T1_%smm_brain_mask.nii.gz" %(mr_directory,voxdim)
    if not os.path.exists(mask):
        mask = nib.load("%s/MNI152_T1_2mm_brain_mask.nii.gz" %(mr_directory))
        mask = resample_img(mask,target_affine=numpy.diag([voxdim,voxdim,voxdim]),interpolation="nearest")
        return mask
    else:
        return nib.load(mask)
Exemple #21
0
    def transform(self, imgs, confounds=None):
        if self.smoothing_fwhm is not None or self.target_affine is not None:
            if self.smoothing_fwhm is not None:
                imgs = smooth_img(imgs, self.smoothing_fwhm)
            if self.target_affine is not None:
                imgs = resample_img(imgs, target_affine=self.target_affine)
        else:
            imgs = [check_niimg_3d(img) for img in imgs] if isinstance(imgs, list) else check_niimg_3d(imgs)

        return imgs
Exemple #22
0
def resample(in_file, **kwargs):
    """ Use nilearn.image.resample_img.

    Returns
    -------
    out_file: str
        The absolute path to the output file.
    """
    from nilearn.image import resample_img
    return resample_img(img=in_file, **kwargs)
    def _contrast(self, contrast_id, contrast_values):
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        contrast = None

        n_regressors = [glm.X.shape[1] for glm in self.glm_]
        contrast_values = check_contrast(contrast_values, n_regressors)

        for i, (glm, con_val) in enumerate(zip(self.glm_, contrast_values)):
            if con_val is None or np.all(con_val == 0):
                pass  # print 'Contrast for session %d is null' % i
            elif contrast is None:
                contrast = glm.contrast(
                    con_val, contrast_type=self.contrast_type)
            else:
                contrast = contrast + glm.contrast(
                    con_val, contrast_type=self.contrast_type)

        if contrast is None:
            return dict()

        mask_array = self.masker.mask_img_.get_data().astype('bool')
        affine = self.masker.mask_img_.get_affine()

        if self.output_z or self.output_stat:
            # compute the contrast and stat
            contrast.z_score()

        do_outputs = [self.output_z, self.output_stat,
                      self.output_effects, self.output_variance]
        estimates = ['z_score_', 'stat_', 'effect', 'variance']
        descrips = ['z statistic', 'Statistical value',
                    'Estimated effect', 'Estimated variance']
        outputs = []
        for (do_output, estimate, descrip) in zip(
                do_outputs, estimates, descrips):

            if do_output:
                result_map = np.zeros(mask_array.shape)
                result_map[mask_array] = getattr(contrast, estimate).squeeze()
                niimg = nb.Nifti1Image(result_map, affine=affine)
                if (self.target_affine is not None
                        or self.target_shape is not None):
                    niimg = resample_img(
                        niimg,
                        target_affine=self.target_affine,
                        target_shape=self.target_shape)

                niimg_path = os.path.join(
                    self.output_dir, '%s_map.nii.gz' % contrast_id)
                niimg.to_filename(niimg_path)
                outputs.append(niimg_path)
        return outputs
Exemple #24
0
def resize_image(input_image,voxel_dimension):
  if len(voxel_dimension) != 3:
    print "Please specify a list of three voxel sizes, e.g. (3,3,3)"
    return
  if determine_read_data(input_image):
    header = nibabel.load(input_image)
  else:
    header = input_image
  voxel_dimension = tuple(voxel_dimension)
  target_affine = np.diag(voxel_dimension)
  resampled = resample_img(header, target_affine=target_affine)
  return resampled
def test_view_stat_map():
    mni = datasets.load_mni152_template()
    # Create a fake functional image by resample the template
    img = image.resample_img(mni, target_affine=3*np.eye(3))
    html = html_stat_map.view_stat_map(img)
    _check_html(html)
    html = html_stat_map.view_stat_map(img, threshold='95%')
    _check_html(html)
    html = html_stat_map.view_stat_map(img, bg_img=mni)
    _check_html(html)
    html = html_stat_map.view_stat_map(img, threshold=2., vmax=4.)
    _check_html(html)
Exemple #26
0
def load_image(masker, filename, save_resampled=True):
    """ Load an image, resampling into MNI space if needed. """
    filename = join(settings.DECODED_IMAGE_DIR, filename)
    img = nb.load(filename)
    if img.shape[:3] != (91, 109, 91):
        img = resample_img(
            img, target_affine=decode_image.anatomical.get_affine(),
            target_shape=(91, 109, 91), interpolation='nearest')
        if save_resampled:
            unlink(filename)
            img.to_filename(filename)
    return masker.mask(img)
Exemple #27
0
    def plotCorrelationResults(self, background, overlay, desired_resolution_file_location):
        image_to_resample = nibabel.load(background)
        image_to_use_for_sample = image.index_img(desired_resolution_file_location, 0)
        resampled_background = resample_img(image_to_resample,target_affine = image_to_use_for_sample.get_affine(), target_shape=image_to_use_for_sample.shape)

        mri_args = {
            'background' : resampled_background,
            'cmap_bg' : 'gray',
            'cmap_overlay' : 'PiYG', # YlOrRd_r # pl.cm.autumn
            'interactive' : cfg.getboolean('examples', 'interactive', True),
            }

        plot_lightbox(overlay=overlay, vlim=(-1.0, 1.0), do_stretch_colors=True, **mri_args)
def compute_confounds(imgs, mask_img, n_confounds=5, get_randomized_svd=False,
                      compute_not_mask=False):
    """
    """
    confounds = []
    if not isinstance(imgs, collections.Iterable) or \
            isinstance(imgs, _basestring):
        imgs = [imgs, ]

    img = _utils.check_niimg_4d(imgs[0])
    shape = img.shape[:3]
    affine = get_affine(img)

    if isinstance(mask_img, _basestring):
        mask_img = _utils.check_niimg_3d(mask_img)

    if not _check_same_fov(img, mask_img):
        mask_img = resample_img(
            mask_img, target_shape=shape, target_affine=affine,
            interpolation='nearest')

    if compute_not_mask:
        print("Non mask based confounds extraction")
        not_mask_data = np.logical_not(mask_img.get_data().astype(np.int))
        whole_brain_mask = masking.compute_multi_epi_mask(imgs)
        not_mask = np.logical_and(not_mask_data, whole_brain_mask.get_data())
        mask_img = new_img_like(img, not_mask.astype(np.int), affine)

    for img in imgs:
        print("[Confounds Extraction] {0}".format(img))
        img = _utils.check_niimg_4d(img)
        print("[Confounds Extraction] high ariance confounds computation]")
        high_variance = high_variance_confounds(img, mask_img=mask_img,
                                                n_confounds=n_confounds)
        if compute_not_mask and get_randomized_svd:
            signals = masking.apply_mask(img, mask_img)
            non_constant = np.any(np.diff(signals, axis=0) != 0, axis=0)
            signals = signals[:, non_constant]
            signals = signal.clean(signals, detrend=True)
            print("[Confounds Extraction] Randomized SVD computation")
            U, s, V = randomized_svd(signals, n_components=n_confounds,
                                     random_state=0)
            if high_variance is not None:
                confound_ = np.hstack((U, high_variance))
            else:
                confound_ = U
        else:
            confound_ = high_variance
        confounds.append(confound_)

    return confounds
Exemple #29
0
def not_in_mni(nii, plot=False):
    mask_nii = nb.load(os.path.join(settings.STATIC_ROOT,'anatomical','MNI152_T1_2mm_brain_mask.nii.gz'))
    
    #resample to the smaller one
    if np.prod(nii.shape) > np.prod(mask_nii.shape):
        nan_mask = np.isnan(nii.get_data())
        if nan_mask.sum() > 0:
            nii.get_data()[nan_mask] = 0
        nii = resample_img(nii, target_affine=mask_nii.get_affine(), target_shape=mask_nii.get_shape(),interpolation='nearest')
    else:
        mask_nii = resample_img(mask_nii, target_affine=nii.get_affine(), target_shape=nii.get_shape(),interpolation='nearest')
    
    brain_mask = mask_nii.get_data() > 0
    excursion_set = np.logical_not(np.logical_or(nii.get_data() == 0, np.isnan(nii.get_data())))    
    
    in_brain_voxels = np.logical_and(excursion_set, brain_mask).sum()
    out_of_brain_voxels = np.logical_and(excursion_set, np.logical_not(brain_mask)).sum()
    
    
    perc_mask_covered = in_brain_voxels/float(brain_mask.sum())*100.0
    if np.isnan(perc_mask_covered):
        perc_mask_covered = 0
    perc_voxels_outside_of_mask = out_of_brain_voxels/float(excursion_set.sum())*100.0
    
    if perc_mask_covered > 50:
        if perc_mask_covered < 90 and perc_voxels_outside_of_mask > 20:
            ret = True
        else:
            ret = False
    elif perc_mask_covered == 0:
        ret = True
    elif perc_voxels_outside_of_mask > 50:
        ret = True
    else:
        ret = False
    
    return ret, perc_mask_covered, perc_voxels_outside_of_mask
Exemple #30
0
def SingleSubject(cfg):
    '''
    Pipeline to run basic signle-subject classification
    '''

    ## Get crossval scheme, can modify here later if we have variable schemes
    crossval = Analysis.get_crossval(**cfg)
    cfg.update(crossval=crossval)    
    print('Crossval  acquired')

    ## Load the data
    data_test = load_data(cfg)
    cfg.update(data_test=data_test.get('nii_func'),
               nii_mean=data_test.get('nii_mean'),
               nii_mask=data_test.get('nii_mask'),
               masker=data_test.get('masker'))
    print('Data loaded')
    
    ## CLassify
    cv_scores,y_pred_all = Analysis.classify(**cfg)
    coef_img,coef_2mm = Analysis.get_impmap(**cfg)    
    print('Classification done')

    ## Searchlight
    sl_4mm = slight.get_searchlight(**cfg)
    # Upsample
    sl_2mm = resample_img(sl_4mm,target_affine = np.diag((2,2,2)))
    print('Searchlight done')
    
    ##  Permutations
    null_cv_scores = Permutations.get_permutations(**cfg)
    null_plot = Permutations.plot_permutation(null_cv_scores)       
    print('Permutations done and plotted')

    ## Confmat
    cm = Analysis.get_confmat(cfg.get('labels').get('regressor'),y_pred_all)
    cm_plot = Analysis.plot_confusion_matrix(cm)    
    print('Confmat done and plotted')

    ## Cook results
    results = dict(accuracy = np.mean(cv_scores),
                   impmap_4mm = coef_img, impmap_2mm = coef_2mm,
                   searchlight_4mm = sl_4mm, searchlight_2mm = sl_2mm,
                   permutation_score = null_cv_scores[1],
                   permutation_plot = null_plot,
                   confusion_matrix = cm,
                   confusion_matrix_plot = cm_plot)
    print('Returning results')
    return results