def test_iter_check_niimgs():
    no_file_matching = "No files matching path: %s"
    affine = np.eye(4)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_2_4d = [[img_4d, img_4d]]

    for empty in ((), [], (i for i in ()), [i for i in ()]):
        assert_raises_regex(ValueError,
                            "Input niimgs list is empty.",
                            list, _iter_check_niimg(empty))

    nofile_path = "/tmp/nofile"
    assert_raises_regex(ValueError,
                        no_file_matching % nofile_path,
                        list, _iter_check_niimg(nofile_path))

    # Create a test file
    filename = tempfile.mktemp(prefix="nilearn_test",
                               suffix=".nii",
                               dir=None)
    img_4d.to_filename(filename)
    niimgs = list(_iter_check_niimg([filename]))
    assert_array_equal(niimgs[0].get_data(),
                       _utils.check_niimg(img_4d).get_data())
    del img_4d
    del niimgs
    os.remove(filename)

    # Regular case
    niimgs = list(_iter_check_niimg(img_2_4d))
    assert_array_equal(niimgs[0].get_data(),
                       _utils.check_niimg(img_2_4d).get_data())
Exemplo n.º 2
0
def test_iter_check_niimgs():
    no_file_matching = "No files matching path: %s"
    affine = np.eye(4)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_2_4d = [[img_4d, img_4d]]

    for empty in ((), [], (i for i in ()), [i for i in ()]):
        assert_raises_regex(ValueError, "Input niimgs list is empty.", list,
                            _iter_check_niimg(empty))

    nofile_path = "/tmp/nofile"
    assert_raises_regex(ValueError, no_file_matching % nofile_path, list,
                        _iter_check_niimg(nofile_path))

    # Create a test file
    filename = tempfile.mktemp(prefix="nilearn_test", suffix=".nii", dir=None)
    img_4d.to_filename(filename)
    niimgs = list(_iter_check_niimg([filename]))
    assert_array_equal(niimgs[0].get_data(),
                       _utils.check_niimg(img_4d).get_data())
    del img_4d
    del niimgs
    os.remove(filename)

    # Regular case
    niimgs = list(_iter_check_niimg(img_2_4d))
    assert_array_equal(niimgs[0].get_data(),
                       _utils.check_niimg(img_2_4d).get_data())
Exemplo n.º 3
0
def test_iter_check_niimgs(tmp_path):
    no_file_matching = "No files matching path: %s"
    affine = np.eye(4)
    img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine)
    img_2_4d = [[img_4d, img_4d]]

    for empty in ((), [], (i for i in ()), [i for i in ()]):
        with pytest.raises(ValueError, match="Input niimgs list is empty."):
            list(_iter_check_niimg(empty))

    nofile_path = "/tmp/nofile"
    with pytest.raises(ValueError, match=no_file_matching % nofile_path):
        list(_iter_check_niimg(nofile_path))

    # Create a test file
    fd, filename = tempfile.mkstemp(prefix="nilearn_test",
                                    suffix=".nii",
                                    dir=str(tmp_path))
    os.close(fd)
    img_4d.to_filename(filename)
    niimgs = list(_iter_check_niimg([filename]))
    assert_array_equal(get_data(niimgs[0]),
                       get_data(_utils.check_niimg(img_4d)))
    del img_4d
    del niimgs
    os.remove(filename)

    # Regular case
    niimgs = list(_iter_check_niimg(img_2_4d))
    assert_array_equal(get_data(niimgs[0]),
                       get_data(_utils.check_niimg(img_2_4d)))
Exemplo n.º 4
0
    def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1):
        ''' Prepare multi subject data in parallel

        Parameters
        ----------

        imgs_list: list of Niimg-like objects
            See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
            List of imgs file to prepare. One item per subject.

        confounds: list of confounds, optional
            List of confounds (2D arrays or filenames pointing to CSV
            files). Must be of same length than imgs_list.

        copy: boolean, optional
            If True, guarantees that output array has no memory in common with
            input array.

        n_jobs: integer, optional
            The number of cpus to use to do the computation. -1 means
            'all cpus'.
        '''

        if not hasattr(self, 'mask_img_'):
            raise ValueError('It seems that %s has not been fitted. '
                             'You must call fit() before calling transform().'
                             % self.__class__.__name__)
        params = get_params(self.__class__, self)

        reference_affine = None
        if self.target_affine is None:
            # Load the first image and use it as a reference for all other
            # subjects
            reference_affine = _utils.check_niimg(imgs_list[0]).get_affine()

        fov = (self.mask_img_.get_affine(), self.mask_img_.shape)
        niimg_iter = _iter_check_niimg(imgs_list, ensure_ndim=None,
                                       atleast_4d=False,
                                       target_fov=fov, memory=self.memory,
                                       memory_level=self.memory_level,
                                       verbose=self.verbose)

        func = self._cache(filter_and_mask, func_memory_level=1,
                           ignore=['verbose', 'memory', 'copy'])
        if confounds is None:
            confounds = itertools.repeat(None, len(imgs_list))
        data = Parallel(n_jobs=n_jobs)(delayed(func)(
                                imgs, self.mask_img_,
                                parameters=params,
                                memory_level=self.memory_level,
                                memory=self.memory,
                                verbose=self.verbose,
                                confounds=confounds,
                                copy=copy)
                          for imgs, confounds in zip(niimg_iter, confounds))
        return list(zip(*data))[0]
Exemplo n.º 5
0
    def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1):
        ''' Prepare multi subject data in parallel

        Parameters
        ----------

        imgs_list: list of Niimg-like objects
            See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
            List of imgs file to prepare. One item per subject.

        confounds: list of confounds, optional
            List of confounds (2D arrays or filenames pointing to CSV
            files). Must be of same length than imgs_list.

        copy: boolean, optional
            If True, guarantees that output array has no memory in common with
            input array.

        n_jobs: integer, optional
            The number of cpus to use to do the computation. -1 means
            'all cpus'.
        '''

        if not hasattr(self, 'mask_img_'):
            raise ValueError('It seems that %s has not been fitted. '
                             'You must call fit() before calling transform().'
                             % self.__class__.__name__)
        params = get_params(self.__class__, self)

        target_fov = None
        if self.target_affine is None:
            # Force resampling on first image
            target_fov = 'first'

        niimg_iter = _iter_check_niimg(imgs_list, ensure_ndim=None,
                                       atleast_4d=False,
                                       target_fov=target_fov,
                                       memory=self.memory,
                                       memory_level=self.memory_level,
                                       verbose=self.verbose)

        func = self._cache(filter_and_mask, func_memory_level=1,
                           ignore=['verbose', 'memory', 'copy'])
        if confounds is None:
            confounds = itertools.repeat(None, len(imgs_list))
        data = Parallel(n_jobs=n_jobs)(delayed(func)(
                                imgs, self.mask_img_,
                                parameters=params,
                                memory_level=self.memory_level,
                                memory=self.memory,
                                verbose=self.verbose,
                                confounds=confounds,
                                copy=copy)
                          for imgs, confounds in izip(niimg_iter, confounds))
        return list(zip(*data))[0]
Exemplo n.º 6
0
    def transform(self, imgs, confounds=None):
        """Extract signals from parcellations learned on fmri images.
        Parameters
        ----------
        imgs : List of Nifti-like images
            See http://nilearn.github.io/manipulating_images/input_output.html.
            Images to process.
        confounds: List of CSV files or arrays-like, optional
            Each file or numpy array in a list should have shape
            (number of scans, number of confounds)
            This parameter is passed to signal.clean. Please see the related
            documentation for details. Must be of same length of imgs.
        Returns
        -------
        region_signals: List of or 2D numpy.ndarray
            Signals extracted for each label for each image.
            Example, for single image shape will be
            (number of scans, number of labels)
        """
        self._check_fitted()
        imgs, confounds, single_subject = _check_parameters_transform(
            imgs, confounds)
        # Requires for special cases like extracting signals on list of
        # 3D images
        imgs_list = _iter_check_niimg(imgs, atleast_4d=True)

        masker = NiftiLabelsMasker(self.labels_img_,
                                   mask_img=self.masker_.mask_img_,
                                   smoothing_fwhm=self.smoothing_fwhm,
                                   standardize=self.standardize,
                                   detrend=self.detrend,
                                   low_pass=self.low_pass,
                                   high_pass=self.high_pass,
                                   t_r=self.t_r,
                                   resampling_target='data',
                                   memory=self.memory,
                                   memory_level=self.memory_level,
                                   verbose=self.verbose)

        region_signals = Parallel(n_jobs=self.n_jobs)(
            delayed(self._cache(_labels_masker_extraction,
                                func_memory_level=2))(img, masker, confound)
            for img, confound in zip(imgs_list, confounds))

        if single_subject:
            return region_signals[0]
        else:
            return region_signals
Exemplo n.º 7
0
def _check_memory(list_img_3d):
    # We intentionally add an offset of memory usage to avoid non trustable
    # measures with memory_profiler.
    mem_offset = b'a' * 100 * 1024**2
    list(_iter_check_niimg(list_img_3d))
    return mem_offset
Exemplo n.º 8
0
def _check_memory(list_img_3d):
    # We intentionally add an offset of memory usage to avoid non trustable
    # measures with memory_profiler.
    mem_offset = b'a' * 100 * 1024 ** 2
    list(_iter_check_niimg(list_img_3d))
    return mem_offset
Exemplo n.º 9
0
    def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1,
                       shelve=False):
        """Prepare multi subject data in parallel

        Parameters
        ----------

        imgs_list: list of Niimg-like objects
            See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
            List of imgs file to prepare. One item per subject.

        confounds: list of confounds, optional
            List of confounds (2D arrays or filenames pointing to CSV
            files). Must be of same length than imgs_list.

        copy: boolean, optional
            If True, guarantees that output array has no memory in common with
            input array.

        n_jobs: integer, optional
            The number of cpus to use to do the computation. -1 means
            'all cpus'.
        
        Returns
        -------
        region_signals: list of 2D numpy.ndarray
            List of signal for each element per subject.
            shape: list of (number of scans, number of elements)
        """

        if not hasattr(self, 'mask_img_'):
            raise ValueError('It seems that %s has not been fitted. '
                             'You must call fit() before calling transform().'
                             % self.__class__.__name__)
        target_fov = None
        if self.target_affine is None:
            # Force resampling on first image
            target_fov = 'first'

        niimg_iter = _iter_check_niimg(imgs_list, ensure_ndim=None,
                                       atleast_4d=False,
                                       target_fov=target_fov,
                                       memory=self.memory,
                                       memory_level=self.memory_level,
                                       verbose=self.verbose)

        if confounds is None:
            confounds = itertools.repeat(None, len(imgs_list))

        # Ignore the mask-computing params: they are not useful and will
        # just invalidate the cache for no good reason
        # target_shape and target_affine are conveyed implicitly in mask_img
        params = get_params(self.__class__, self,
                            ignore=['mask_img', 'mask_args', 'mask_strategy',
                                    'copy'])

        data = Parallel(n_jobs=n_jobs)(
            delayed(shelved_filter_and_mask)(self, imgs, params,
                                             confounds=cfs,
                                             copy=copy,
                                             shelve=shelve,
                                             squeeze=True)
            for imgs, cfs in izip(niimg_iter, confounds))
        return data
Exemplo n.º 10
0
    def transform_imgs(self,
                       imgs_list,
                       confounds=None,
                       copy=True,
                       n_jobs=1,
                       shelve=False):
        """Prepare multi subject data in parallel

        Parameters
        ----------

        imgs_list: list of Niimg-like objects
            See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
            List of imgs file to prepare. One item per subject.

        confounds: list of confounds, optional
            List of confounds (2D arrays or filenames pointing to CSV
            files). Must be of same length than imgs_list.

        copy: boolean, optional
            If True, guarantees that output array has no memory in common with
            input array.

        n_jobs: integer, optional
            The number of cpus to use to do the computation. -1 means
            'all cpus'.
        
        Returns
        -------
        region_signals: list of 2D numpy.ndarray
            List of signal for each element per subject.
            shape: list of (number of scans, number of elements)
        """

        if not hasattr(self, 'mask_img_'):
            raise ValueError(
                'It seems that %s has not been fitted. '
                'You must call fit() before calling transform().' %
                self.__class__.__name__)
        target_fov = None
        if self.target_affine is None:
            # Force resampling on first image
            target_fov = 'first'

        niimg_iter = _iter_check_niimg(imgs_list,
                                       ensure_ndim=None,
                                       atleast_4d=False,
                                       target_fov=target_fov,
                                       memory=self.memory,
                                       memory_level=self.memory_level,
                                       verbose=self.verbose)

        if confounds is None:
            confounds = itertools.repeat(None, len(imgs_list))

        # Ignore the mask-computing params: they are not useful and will
        # just invalidate the cache for no good reason
        # target_shape and target_affine are conveyed implicitly in mask_img
        params = get_params(
            self.__class__,
            self,
            ignore=['mask_img', 'mask_args', 'mask_strategy', 'copy'])

        data = Parallel(n_jobs=n_jobs)(
            delayed(shelved_filter_and_mask)(self,
                                             imgs,
                                             params,
                                             confounds=cfs,
                                             copy=copy,
                                             shelve=shelve,
                                             squeeze=True)
            for imgs, cfs in izip(niimg_iter, confounds))
        return data