Exemple #1
0
def _mask_and_reduce_single(masker,
                            img,
                            confound,
                            reduction_ratio=None,
                            reduction_method=None,
                            n_samples=None,
                            memory=None,
                            memory_level=0,
                            random_state=None):
    """Utility function for multiprocessing from MaskReducer"""
    this_data = masker.transform(img, confound)
    # Now get rid of the img as fast as possible, to free a
    # reference count on it, and possibly free the corresponding
    # data
    del img
    random_state = check_random_state(random_state)

    data_n_samples, data_n_features = this_data.shape
    if reduction_ratio is None:
        assert n_samples is not None
        n_samples = min(n_samples, data_n_samples)
    else:
        n_samples = int(ceil(data_n_samples * reduction_ratio))
    if reduction_method == 'svd':
        if n_samples <= data_n_features // 4:
            U, S, _ = cache(randomized_svd,
                            memory,
                            memory_level=memory_level,
                            func_memory_level=3)(this_data.T,
                                                 n_samples,
                                                 transpose=True,
                                                 random_state=random_state)
            U = U.T
        else:
            U, S, _ = cache(linalg.svd,
                            memory,
                            memory_level=memory_level,
                            func_memory_level=3)(this_data.T,
                                                 full_matrices=False)
            U = U.T[:n_samples].copy()
            S = S[:n_samples]
        U = U * S[:, np.newaxis]
    elif reduction_method == 'rf':
        Q = cache(randomized_range_finder,
                  memory,
                  memory_level=memory_level,
                  func_memory_level=3)(this_data,
                                       n_samples,
                                       n_iter=3,
                                       random_state=random_state)
        U = Q.T.dot(this_data)
    elif reduction_method == 'ss':
        indices = np.floor(np.linspace(0, this_data.shape[0] - 1,
                                       n_samples)).astype('int')
        U = this_data[indices]
    else:
        U = this_data

    return U
Exemple #2
0
def test_cache_shelving():
    with tempfile.TemporaryDirectory() as temp_dir:
        joblib_dir = Path(temp_dir, 'joblib', 'nilearn', 'tests',
                          'test_cache_mixin', 'f')
        mem = Memory(cachedir=temp_dir, verbose=0)
        res = cache_mixin.cache(f, mem, shelve=True)(2)
        assert_equal(res.get(), 2)
        assert_equal(len(_get_subdirs(joblib_dir)), 1)
        res = cache_mixin.cache(f, mem, shelve=True)(2)
        assert_equal(res.get(), 2)
        assert_equal(len(_get_subdirs(joblib_dir)), 1)
Exemple #3
0
def _mask_and_reduce_single(masker,
                            img, confound,
                            reduction_ratio=None,
                            reduction_method=None,
                            n_samples=None,
                            memory=None,
                            memory_level=0,
                            random_state=None):
    """Utility function for multiprocessing from MaskReducer"""
    this_data = masker.transform(img, confound)
    # Now get rid of the img as fast as possible, to free a
    # reference count on it, and possibly free the corresponding
    # data
    del img
    random_state = check_random_state(random_state)

    data_n_samples, data_n_features = this_data.shape
    if reduction_ratio is None:
        assert n_samples is not None
        n_samples = min(n_samples, data_n_samples)
    else:
        n_samples = int(ceil(data_n_samples * reduction_ratio))
    if reduction_method == 'svd':
        if n_samples <= data_n_features // 4:
            U, S, _ = cache(randomized_svd, memory,
                            memory_level=memory_level,
                            func_memory_level=3)(this_data.T,
                                                 n_samples,
                                                 transpose=True,
                                                 random_state=random_state)
            U = U.T
        else:
            U, S, _ = cache(linalg.svd, memory,
                            memory_level=memory_level,
                            func_memory_level=3)(this_data.T,
                                                 full_matrices=False)
            U = U.T[:n_samples].copy()
            S = S[:n_samples]
        U = U * S[:, np.newaxis]
    elif reduction_method == 'rf':
        Q = cache(randomized_range_finder, memory,
                  memory_level=memory_level,
                  func_memory_level=3)(this_data,
                                       n_samples, n_iter=3,
                                       random_state=random_state)
        U = Q.T.dot(this_data)
    elif reduction_method == 'ss':
        indices = np.floor(np.linspace(0, this_data.shape[0] - 1,
                                       n_samples)).astype('int')
        U = this_data[indices]
    else:
        U = this_data

    return U
Exemple #4
0
def test_cache_shelving():
    try:
        temp_dir = tempfile.mkdtemp()
        job_glob = os.path.join(temp_dir, 'joblib', 'nilearn', 'tests',
                                'test_cache_mixin', 'f', '*')
        mem = Memory(cachedir=temp_dir, verbose=0)
        res = cache_mixin.cache(f, mem, shelve=True)(2)
        assert_equal(res.get(), 2)
        assert_equal(len(glob.glob(job_glob)), 1)
        res = cache_mixin.cache(f, mem, shelve=True)(2)
        assert_equal(res.get(), 2)
        assert_equal(len(glob.glob(job_glob)), 1)
    finally:
        del mem
        shutil.rmtree(temp_dir, ignore_errors=True)
Exemple #5
0
def cached_mask_and_reduce_single(masker,
                                  img,
                                  confound,
                                  reduction_ratio=1,
                                  reduction_method=None,
                                  n_samples=None,
                                  memory_level=0,
                                  memory=Memory(cachedir=None),
                                  random_state=0,
                                  as_shelved_list=False):
    if as_shelved_list:
        return cache(_mask_and_reduce_single,
                     memory=memory,
                     memory_level=memory_level,
                     func_memory_level=0).call_and_shelve(
                         masker,
                         img,
                         confound,
                         reduction_ratio=reduction_ratio,
                         reduction_method=reduction_method,
                         n_samples=n_samples,
                         memory=memory,
                         memory_level=memory_level,
                         random_state=random_state)
    else:
        return _mask_and_reduce_single(masker,
                                       img,
                                       confound,
                                       reduction_ratio=reduction_ratio,
                                       reduction_method=reduction_method,
                                       n_samples=n_samples,
                                       memory=memory,
                                       memory_level=memory_level,
                                       random_state=random_state)
def _region_extractor_cache(maps_img, mask_img=None, min_region_size=2500,
                            threshold=1., thresholding_strategy='ratio_n_voxels',
                            extractor='local_regions',
                            memory=Memory(cachedir=None), memory_level=0):
    """Region Extraction with caching built upon helper functions

    See nilearn.regions.RegionExtractor for documentation and related
    """

    if thresholding_strategy == 'ratio_n_voxels':
        print("[Thresholding] using maps ratio with threshold={0}"
              " ".format(threshold))
        threshold_maps = _threshold_maps_ratio(maps_img, threshold)
    else:
        if thresholding_strategy == 'percentile':
            threshold = "{0}%".format(threshold)
            print("[Thresholding] using threshold_img with threshold={0}"
                  " ".format(threshold))
        threshold_maps = threshold_img(maps_img, mask_img=mask_img,
                                       threshold=threshold)
    print("Done thresholding")
    print("[Region Extraction] with {0}".format(extractor))
    regions_img, _ = cache(connected_regions, memory,
                           memory_level=memory_level,
                           func_memory_level=1)(threshold_maps,
                                                min_region_size=min_region_size,
                                                extract_type=extractor)
    return regions_img
Exemple #7
0
def cached_mask_and_reduce_single(masker, img, confound,
                                  reduction_ratio=1,
                                  reduction_method=None,
                                  n_samples=None,
                                  memory_level=0,
                                  memory=Memory(cachedir=None),
                                  random_state=0,
                                  as_shelved_list=False):
    if as_shelved_list:
        return cache(
            _mask_and_reduce_single, memory=memory,
            memory_level=memory_level,
            func_memory_level=0).call_and_shelve(
            masker, img,
            confound,
            reduction_ratio=reduction_ratio,
            reduction_method=reduction_method,
            n_samples=n_samples,
            memory=memory,
            memory_level=memory_level,
            random_state=random_state)
    else:
        return _mask_and_reduce_single(
            masker, img, confound,
            reduction_ratio=reduction_ratio,
            reduction_method=reduction_method,
            n_samples=n_samples,
            memory=memory,
            memory_level=memory_level,
            random_state=random_state)
Exemple #8
0
def test_cache_memory_level():
    with tempfile.TemporaryDirectory() as temp_dir:
        joblib_dir = Path(temp_dir, 'joblib', 'nilearn', 'tests',
                          'test_cache_mixin', 'f')
        mem = Memory(cachedir=temp_dir, verbose=0)
        cache_mixin.cache(f, mem, func_memory_level=2, memory_level=1)(2)
        assert_equal(len(_get_subdirs(joblib_dir)), 0)
        cache_mixin.cache(f, Memory(cachedir=None))(2)
        assert_equal(len(_get_subdirs(joblib_dir)), 0)
        cache_mixin.cache(f, mem, func_memory_level=2, memory_level=3)(2)
        assert_equal(len(_get_subdirs(joblib_dir)), 1)
        cache_mixin.cache(f, mem)(3)
        assert_equal(len(_get_subdirs(joblib_dir)), 2)
def test_cache_memory_level():
    temp_dir = tempfile.mkdtemp()
    job_glob = os.path.join(temp_dir, 'joblib', 'nilearn', 'tests',
                            'test_cache_mixin', 'f', '*')
    mem = Memory(cachedir=temp_dir, verbose=0)
    cache_mixin.cache(f, mem, func_memory_level=2, memory_level=1)(2)
    assert_equal(len(glob.glob(job_glob)), 0)
    cache_mixin.cache(f, Memory(cachedir=None))(2)
    assert_equal(len(glob.glob(job_glob)), 0)
    cache_mixin.cache(f, mem, func_memory_level=2, memory_level=3)(2)
    assert_equal(len(glob.glob(job_glob)), 2)
    cache_mixin.cache(f, mem)(3)
    assert_equal(len(glob.glob(job_glob)), 3)
Exemple #10
0
def test_cache_memory_level():
    temp_dir = tempfile.mkdtemp()
    job_glob = os.path.join(temp_dir, 'joblib', 'nilearn', 'tests',
                            'test_cache_mixin', 'f', '*')
    mem = Memory(cachedir=temp_dir, verbose=0)
    cache_mixin.cache(f, mem, func_memory_level=2, memory_level=1)(2)
    assert_equal(len(glob.glob(job_glob)), 0)
    cache_mixin.cache(f, Memory(cachedir=None))(2)
    assert_equal(len(glob.glob(job_glob)), 0)
    cache_mixin.cache(f, mem, func_memory_level=2, memory_level=3)(2)
    assert_equal(len(glob.glob(job_glob)), 2)
    cache_mixin.cache(f, mem)(3)
    assert_equal(len(glob.glob(job_glob)), 3)
def test_cache_shelving():
    try:
        temp_dir = tempfile.mkdtemp()
        job_glob = os.path.join(temp_dir, 'joblib', 'nilearn', 'tests',
                                'test_cache_mixin', 'f', '*')
        mem = Memory(cachedir=temp_dir, verbose=0)
        if LooseVersion(sklearn.__version__) >= LooseVersion('0.15'):
            res = cache_mixin.cache(f, mem, shelve=True)(2)
            assert_equal(res.get(), 2)
            assert_equal(len(glob.glob(job_glob)), 1)
            res = cache_mixin.cache(f, mem, shelve=True)(2)
            assert_equal(res.get(), 2)
            assert_equal(len(glob.glob(job_glob)), 1)
        else:
            assert_raises_regex(ValueError, 'Shelving is only available'
                                            ' if scikit-learn >= 0.15'
                                            ' is installed.',
                                cache_mixin.cache,
                                f, mem, shelve=True)
    finally:
        del mem
        shutil.rmtree(temp_dir, ignore_errors=True)
Exemple #12
0
def test_cache_shelving():
    try:
        temp_dir = tempfile.mkdtemp()
        job_glob = os.path.join(temp_dir, 'joblib', 'nilearn', 'tests',
                                'test_cache_mixin', 'f', '*')
        mem = Memory(cachedir=temp_dir, verbose=0)
        if LooseVersion(sklearn.__version__) >= LooseVersion('0.15'):
            res = cache_mixin.cache(f, mem, shelve=True)(2)
            assert_equal(res.get(), 2)
            assert_equal(len(glob.glob(job_glob)), 1)
            res = cache_mixin.cache(f, mem, shelve=True)(2)
            assert_equal(res.get(), 2)
            assert_equal(len(glob.glob(job_glob)), 1)
        else:
            assert_raises_regex(ValueError, 'Shelving is only available'
                                ' if scikit-learn >= 0.15'
                                ' is installed.',
                                cache_mixin.cache,
                                f,
                                mem,
                                shelve=True)
    finally:
        del mem
        shutil.rmtree(temp_dir, ignore_errors=True)
Exemple #13
0
    def fit(self, X, y=None):
        """Fit the covariance estimator to the given time series for each
        subject.

        Parameters
        ----------
        X : list of numpy.ndarray, shape for each (n_samples, n_features)
            The input subjects time series.

        Returns
        -------
        self : ConnectivityMatrix instance
            The object itself. Useful for chaining operations.
        """
        self.cov_estimator_ = clone(self.cov_estimator)
        if not hasattr(X, "__iter__"):
            raise ValueError("'subjects' input argument must be an iterable. "
                             "You provided {0}".format(X.__class__))

        subjects_types = [type(s) for s in X]
        if set(subjects_types) != set([np.ndarray]):
            raise ValueError("Each subject must be 2D numpy.ndarray.\n You "
                             "provided {0}".format(str(subjects_types)))

        subjects_dims = [s.ndim for s in X]
        if set(subjects_dims) != set([2]):
            raise ValueError("Each subject must be 2D numpy.ndarray.\n You"
                             "provided arrays of dimensions "
                             "{0}".format(str(subjects_dims)))

        n_subjects = [s.shape[1] for s in X]
        if len(set(n_subjects)) > 1:
            raise ValueError("All subjects must have the same number of "
                             "features.\nYou provided: "
                             "{0}".format(str(n_subjects)))

        if self.kind == 'tangent':
            covariances = [self.cov_estimator_.fit(x).covariance_ for x in X]
            self.mean_ = cache(_geometric_mean,
                               memory=self.memory,
                               func_memory_level=2,
                               memory_level=self.memory_level)(covariances,
                                                               max_iter=30,
                                                               tol=1e-7)
            self.whitening_ = _map_eigenvalues(lambda x: 1. / np.sqrt(x),
                                               self.mean_)

        return self
Exemple #14
0
def test_cache_memory_level():
    temp_dir = tempfile.mkdtemp()
    job_glob = os.path.join(temp_dir, 'joblib', '*')
    mem = Memory(cachedir=temp_dir, verbose=0)
    cache_mixin.cache(f, mem)(2)
    assert_true(len(glob.glob(job_glob)) == 0)
    cache_mixin.cache(f, mem, func_memory_level=2, memory_level=1)(2)
    assert_true(len(glob.glob(job_glob)) == 0)
    cache_mixin.cache(f, mem, func_memory_level=2, memory_level=3)(2)
    assert_true(len(glob.glob(job_glob)) == 2)
    def fit(self, X, y=None):
        """Fit the covariance estimator to the given time series for each
        subject.

        Parameters
        ----------
        X : list of numpy.ndarray, shape for each (n_samples, n_features)
            The input subjects time series.

        Returns
        -------
        self : ConnectivityMatrix instance
            The object itself. Useful for chaining operations.
        """
        self.cov_estimator_ = clone(self.cov_estimator)
        if not hasattr(X, "__iter__"):
            raise ValueError("'subjects' input argument must be an iterable. "
                             "You provided {0}".format(X.__class__))

        subjects_types = [type(s) for s in X]
        if set(subjects_types) != set([np.ndarray]):
            raise ValueError("Each subject must be 2D numpy.ndarray.\n You "
                             "provided {0}".format(str(subjects_types)))

        subjects_dims = [s.ndim for s in X]
        if set(subjects_dims) != set([2]):
            raise ValueError("Each subject must be 2D numpy.ndarray.\n You"
                             "provided arrays of dimensions "
                             "{0}".format(str(subjects_dims)))

        n_subjects = [s.shape[1] for s in X]
        if len(set(n_subjects)) > 1:
            raise ValueError("All subjects must have the same number of "
                             "features.\nYou provided: "
                             "{0}".format(str(n_subjects)))

        if self.kind == 'tangent':
            covariances = [self.cov_estimator_.fit(x).covariance_ for x in X]
            self.mean_ = cache(
                _geometric_mean, memory=self.memory, func_memory_level=2,
                memory_level=self.memory_level)(
                    covariances, max_iter=30, tol=1e-7)
            self.whitening_ = _map_eigenvalues(lambda x: 1. / np.sqrt(x),
                                               self.mean_)

        return self
def eddy_current_correction(img, file_bvals, file_bvecs, write_dir, mem,
                            acqp='b0_acquisition_params_AP.txt'):
    """ Perform Eddy current correction on diffusion data
    Todo: do topup + eddy in one single command 
    """
    import nibabel as nib
    bvals = np.loadtxt(file_bvals)
    mean_img = get_mean_unweighted_image(nib.load(img), bvals)
    mean_img.to_filename(os.path.join(write_dir, 'mean_unweighted.nii.gz'))
    mask = compute_epi_mask(mean_img)
    mask_file = os.path.join(write_dir, 'mask.nii.gz')
    nib.save(mask, mask_file)
    corrected = os.path.join(os.path.dirname(img),
                             'ed' + os.path.basename(img))
    index = np.ones(len(bvals), np.uint8)
    index_file = os.path.join(write_dir, 'index.txt')
    np.savetxt(index_file, index)
    cmd = 'fsl5.0-eddy_correct --acqp=%s --bvals=%s --bvecs=%s --imain=%s --index=%s --mask=%s --out=%s' % (
        acqp, file_bvals, file_bvecs, img, index_file, mask_file, corrected)
    cmd = 'fsl5.0-eddy_correct %s %s %d' % (img, corrected, 0)
    print(cmd)
    print(commands.getoutput(cmd))
    print(cache(commands.getoutput, mem)(cmd))
def subject_covariance(
        estimator, niimgs, mask_img, parameters,
        confounds=None,
        ref_memory_level=0,
        memory=Memory(cachedir=None),
        connectivity=None,
        verbose=0,
        copy=True):
    data, affine = cache(
        filter_and_mask, memory=memory, ref_memory_level=ref_memory_level,
        memory_level=2,
        ignore=['verbose', 'memory', 'ref_memory_level', 'copy'])(
            niimgs, mask_img, parameters,
            ref_memory_level=ref_memory_level,
            memory=memory,
            verbose=verbose,
            confounds=confounds,
            copy=copy)
    estimator = clone(estimator)
    if connectivity is not None:
        estimator.fit(data, connectivity=connectivity)
    else:
        estimator.fit(data)
    return estimator.covariance_
Exemple #18
0
def _build_parcellations(all_subjects_data, mask, n_parcellations=100,
                         n_parcels=1000, n_bootstrap_samples=None,
                         random_state=None, memory=Memory(cachedir=None),
                         n_jobs=1, verbose=False):
    """Build the parcellations for the RPBI framework.

    Parameters
    ----------
    all_subjects_data : array_like, shape=(n_samples, n_voxels)
      Masked subject images as an array.

    mask : ndarray of booleans
      Mask that has been applied on the initial images to obtain
      `all_subjects_data`.

    n_parcellations : int,
      The number of parcellations to be built and used to extract
      signal averages from the data.

    n_parcels : int,
      Number of parcels for the parcellations.

    n_bootstrap_samples : int,
      Number of subjects to be used to build the parcellations. The subjects
      are randomly drawn with replacement.
      If set to None, n_samples subjects are drawn, which correspond to
      a bootstrap draw.

    random_state : int,
      Random numbers seed for reproducible results.

    memory : instance of joblib.Memory or string
      Used to cache the masking process.
      By default, no caching is done. If a string is given, it is the
      path to the caching directory.

    n_jobs : int,
      Number of parallel workers.
      If 0 is provided, all CPUs are used.
      A negative number indicates that all the CPUs except (|n_jobs| - 1) ones
      will be used.

    verbose : boolean,
      Activate verbose mode (default is False).

    Returns
    -------
    parcelled_data : np.ndarray, shape=(n_parcels_tot, n_subjs)
      Data for all subjects after mean signal extraction with all the
      parcellations that have been created.

    ward_labels : np.ndarray, shape=(n_vox * n_wards, )
      Voxel-to-parcel map for all the parcellations. Useful to perform
      inverse transforms.

    TODO
    ----
    - Deal with NaNs in the original data (FeatureAgglomeration cannot fit
      when NaNs are present in the data). Median imputation?

    """
    # initialize the seed of the random generator
    rng = check_random_state(random_state)

    # check n_jobs (number of CPUs)
    n_jobs = check_n_jobs(n_jobs)

    n_samples = all_subjects_data.shape[0]
    if n_bootstrap_samples is None:
        n_bootstrap_samples = n_samples

    # Compute connectivity
    shape = mask.shape
    connectivity = image.grid_to_graph(
        n_x=shape[0], n_y=shape[1], n_z=shape[2], mask=mask)

    # Build parcellations
    draw = rng.randint(n_samples, size=n_bootstrap_samples * n_parcellations)
    draw = draw.reshape((n_parcellations, -1))
    ret = joblib.Parallel(n_jobs=n_jobs)(joblib.delayed(
            cache(_ward_fit_transform, memory, verbose=verbose))
          (all_subjects_data, draw[i], connectivity, n_parcels, i * n_parcels)
          for i in range(n_parcellations))
    # reduce results
    parcelled_data_parts, ward_labels = zip(*ret)
    parcelled_data = np.hstack((parcelled_data_parts))
    ward_labels = np.ravel(ward_labels)

    return parcelled_data, ward_labels
Exemple #19
0
def _build_parcellations(all_subjects_data,
                         mask,
                         n_parcellations=100,
                         n_parcels=1000,
                         n_bootstrap_samples=None,
                         random_state=None,
                         memory=Memory(cachedir=None),
                         n_jobs=1,
                         verbose=False):
    """Build the parcellations for the RPBI framework.

    Parameters
    ----------
    all_subjects_data : array_like, shape=(n_samples, n_voxels)
      Masked subject images as an array.

    mask : ndarray of booleans
      Mask that has been applied on the initial images to obtain
      `all_subjects_data`.

    n_parcellations : int,
      The number of parcellations to be built and used to extract
      signal averages from the data.

    n_parcels : int,
      Number of parcels for the parcellations.

    n_bootstrap_samples : int,
      Number of subjects to be used to build the parcellations. The subjects
      are randomly drawn with replacement.
      If set to None, n_samples subjects are drawn, which correspond to
      a bootstrap draw.

    random_state : int,
      Random numbers seed for reproducible results.

    memory : instance of joblib.Memory or string
      Used to cache the masking process.
      By default, no caching is done. If a string is given, it is the
      path to the caching directory.

    n_jobs : int,
      Number of parallel workers.
      If 0 is provided, all CPUs are used.
      A negative number indicates that all the CPUs except (|n_jobs| - 1) ones
      will be used.

    verbose : boolean,
      Activate verbose mode (default is False).

    Returns
    -------
    parcelled_data : np.ndarray, shape=(n_parcels_tot, n_subjs)
      Data for all subjects after mean signal extraction with all the
      parcellations that have been created.

    ward_labels : np.ndarray, shape=(n_vox * n_wards, )
      Voxel-to-parcel map for all the parcellations. Useful to perform
      inverse transforms.

    TODO
    ----
    - Deal with NaNs in the original data (WardAgglomeration cannot fit
      when NaNs are present in the data). Median imputation?

    """
    # initialize the seed of the random generator
    rng = check_random_state(random_state)

    # check n_jobs (number of CPUs)
    n_jobs = check_n_jobs(n_jobs)

    n_samples = all_subjects_data.shape[0]
    if n_bootstrap_samples is None:
        n_bootstrap_samples = n_samples

    # Compute connectivity
    shape = mask.shape
    connectivity = image.grid_to_graph(n_x=shape[0],
                                       n_y=shape[1],
                                       n_z=shape[2],
                                       mask=mask)

    # Build parcellations
    draw = rng.randint(n_samples, size=n_bootstrap_samples * n_parcellations)
    draw = draw.reshape((n_parcellations, -1))
    ret = joblib.Parallel(n_jobs=n_jobs)(
        joblib.delayed(cache(_ward_fit_transform, memory, verbose=verbose))(
            all_subjects_data, draw[i], connectivity, n_parcels, i * n_parcels)
        for i in range(n_parcellations))
    # reduce results
    parcelled_data_parts, ward_labels = zip(*ret)
    parcelled_data = np.hstack((parcelled_data_parts))
    ward_labels = np.ravel(ward_labels)

    return parcelled_data, np.ravel(ward_labels)