コード例 #1
0
ファイル: multi_nifti_masker.py プロジェクト: BigR-Lab/modl
    def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1,
                       shelve=False):
        """Prepare multi subject data in parallel

        Parameters
        ----------

        imgs_list: list of Niimg-like objects
            See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
            List of imgs file to prepare. One item per subject.

        confounds: list of confounds, optional
            List of confounds (2D arrays or filenames pointing to CSV
            files). Must be of same length than imgs_list.

        copy: boolean, optional
            If True, guarantees that output array has no memory in common with
            input array.

        n_jobs: integer, optional
            The number of cpus to use to do the computation. -1 means
            'all cpus'.
        
        Returns
        -------
        region_signals: list of 2D numpy.ndarray
            List of signal for each element per subject.
            shape: list of (number of scans, number of elements)
        """

        if not hasattr(self, 'mask_img_'):
            raise ValueError('It seems that %s has not been fitted. '
                             'You must call fit() before calling transform().'
                             % self.__class__.__name__)
        target_fov = None
        if self.target_affine is None:
            # Force resampling on first image
            target_fov = 'first'

        niimg_iter = _iter_check_niimg(imgs_list, ensure_ndim=None,
                                       atleast_4d=False,
                                       target_fov=target_fov,
                                       memory=self.memory,
                                       memory_level=self.memory_level,
                                       verbose=self.verbose)

        if confounds is None:
            confounds = itertools.repeat(None, len(imgs_list))

        # Ignore the mask-computing params: they are not useful and will
        # just invalidate the cache for no good reason
        # target_shape and target_affine are conveyed implicitly in mask_img
        params = get_params(self.__class__, self,
                            ignore=['mask_img', 'mask_args', 'mask_strategy',
                                    'copy'])

        data = Parallel(n_jobs=n_jobs)(
            delayed(shelved_filter_and_mask)(self, imgs, params,
                                             confounds=cfs,
                                             copy=copy,
                                             shelve=shelve,
                                             squeeze=True)
            for imgs, cfs in izip(niimg_iter, confounds))
        return data
コード例 #2
0
ファイル: multi_nifti_masker.py プロジェクト: ml-lab/modl
    def transform_imgs(self,
                       imgs_list,
                       confounds=None,
                       copy=True,
                       n_jobs=1,
                       shelve=False):
        """Prepare multi subject data in parallel

        Parameters
        ----------

        imgs_list: list of Niimg-like objects
            See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
            List of imgs file to prepare. One item per subject.

        confounds: list of confounds, optional
            List of confounds (2D arrays or filenames pointing to CSV
            files). Must be of same length than imgs_list.

        copy: boolean, optional
            If True, guarantees that output array has no memory in common with
            input array.

        n_jobs: integer, optional
            The number of cpus to use to do the computation. -1 means
            'all cpus'.
        
        Returns
        -------
        region_signals: list of 2D numpy.ndarray
            List of signal for each element per subject.
            shape: list of (number of scans, number of elements)
        """

        if not hasattr(self, 'mask_img_'):
            raise ValueError(
                'It seems that %s has not been fitted. '
                'You must call fit() before calling transform().' %
                self.__class__.__name__)
        target_fov = None
        if self.target_affine is None:
            # Force resampling on first image
            target_fov = 'first'

        niimg_iter = _iter_check_niimg(imgs_list,
                                       ensure_ndim=None,
                                       atleast_4d=False,
                                       target_fov=target_fov,
                                       memory=self.memory,
                                       memory_level=self.memory_level,
                                       verbose=self.verbose)

        if confounds is None:
            confounds = itertools.repeat(None, len(imgs_list))

        # Ignore the mask-computing params: they are not useful and will
        # just invalidate the cache for no good reason
        # target_shape and target_affine are conveyed implicitly in mask_img
        params = get_params(
            self.__class__,
            self,
            ignore=['mask_img', 'mask_args', 'mask_strategy', 'copy'])

        data = Parallel(n_jobs=n_jobs)(
            delayed(shelved_filter_and_mask)(self,
                                             imgs,
                                             params,
                                             confounds=cfs,
                                             copy=copy,
                                             shelve=shelve,
                                             squeeze=True)
            for imgs, cfs in izip(niimg_iter, confounds))
        return data
コード例 #3
0
# First we plot DMN without region extraction, interested in only index=[3]
img = image.index_img(components_img, 3)
coords = plotting.find_xyz_cut_coords(img)
display = plotting.plot_stat_map(img,
                                 cut_coords=((0, -52, 29)),
                                 colorbar=False,
                                 title='ICA map: DMN mode')

# Now, we plot DMN after region extraction to show that connected regions are
# nicely separated. Each brain extracted region is indicated with separate color

# For this, we take the indices of the all regions extracted related to original
# ICA map 3.
regions_indices_of_map3 = np.where(np.array(regions_index) == 3)

display = plotting.plot_anat(cut_coords=((0, -52, 29)),
                             title='Extracted regions in DMN mode')

# Now add as an overlay by looping over all the regions for right
# temporoparietal function, posterior cingulate cortex, medial prefrontal
# cortex, left temporoparietal junction
color_list = [[0., 1., 0.29, 1.], [0., 1., 0.54, 1.], [0., 1., 0.78, 1.],
              [0., 0.96, 1., 1.], [0., 0.73, 1., 1.], [0., 0.47, 1., 1.],
              [0., 0.22, 1., 1.], [0.01, 0., 1., 1.], [0.26, 0., 1., 1.]]
for each_index_of_map3, color in izip(regions_indices_of_map3[0], color_list):
    display.add_overlay(image.index_img(regions_extracted_img,
                                        each_index_of_map3),
                        cmap=plotting.cm.alpha_cmap(color))

plotting.show()
コード例 #4
0
coords = plotting.find_xyz_cut_coords(img)
display = plotting.plot_stat_map(img, cut_coords=((0, -52, 29)), colorbar=False, title="ICA map: DMN mode")

# Now, we plot DMN after region extraction to show that connected regions are
# nicely separated. Each brain extracted region is indicated with separate color

# For this, we take the indices of the all regions extracted related to original
# ICA map 3.
regions_indices_of_map3 = np.where(np.array(regions_index) == 3)

display = plotting.plot_anat(cut_coords=((0, -52, 29)), title="Extracted regions in DMN mode")

# Now add as an overlay by looping over all the regions for right
# temporoparietal function, posterior cingulate cortex, medial prefrontal
# cortex, left temporoparietal junction
color_list = [
    [0.0, 1.0, 0.29, 1.0],
    [0.0, 1.0, 0.54, 1.0],
    [0.0, 1.0, 0.78, 1.0],
    [0.0, 0.96, 1.0, 1.0],
    [0.0, 0.73, 1.0, 1.0],
    [0.0, 0.47, 1.0, 1.0],
    [0.0, 0.22, 1.0, 1.0],
    [0.01, 0.0, 1.0, 1.0],
    [0.26, 0.0, 1.0, 1.0],
]
for each_index_of_map3, color in izip(regions_indices_of_map3[0], color_list):
    display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3), cmap=plotting.cm.alpha_cmap(color))

plotting.show()