Пример #1
0
def high_variance_confounds(file_names):
    """ Return confounds time series extracted from voxels with high
        variance.

        Parameters
        ===========
        filenames: list of 3d nifti or analyze files
            The ras (non preprocessed) dataset

        Notes
        ======
        This method is related to what has been published in the
        literature as 'CompCorr' (Behzadi NeuroImage 2007).
    """
    mask = np.ones(nibabel.load(filenames[0]).get_data().shape,
                   dtype=np.bool)
    series, _ = mask_tools.series_from_mask(
                                filenames,
                                mask)
    for serie in series:
        serie[:] = signal.detrend(serie)
    # Retrieve the 1% high variance voxels
    var = np.mean(series**2, axis=-1)
    var_thr = stats.scoreatpercentile(var, 99)
    series = series[var > var_thr]
    u, s, v = linalg.svd(series, full_matrices=False)
    v = v[:10]
    return v
Пример #2
0
def high_variance_confounds(file_names):
    """ Return confounds time series extracted from voxels with high
        variance.

        Parameters
        ===========
        filenames: list of 3d nifti or analyze files
            The ras (non preprocessed) dataset

        Notes
        ======
        This method is related to what has been published in the
        literature as 'CompCorr' (Behzadi NeuroImage 2007).
    """
    mask = np.ones(nibabel.load(filenames[0]).get_data().shape, dtype=np.bool)
    series, _ = mask_tools.series_from_mask(filenames, mask)
    for serie in series:
        serie[:] = signal.detrend(serie)
    # Retrieve the 1% high variance voxels
    var = np.mean(series**2, axis=-1)
    var_thr = stats.scoreatpercentile(var, 99)
    series = series[var > var_thr]
    u, s, v = linalg.svd(series, full_matrices=False)
    v = v[:10]
    return v
Пример #3
0
def session_pca(raw_filenames, mask, smooth=False, 
                two_levels=False, n_first_components=None):
    """ Do the preprocessing and calculate the PCA components for a
        single session.

        Parameters
        -----------
        mask: 3d ndarray
            3D mask array: true where a voxel should be used.
        smooth: False or float, optional
            If smooth is not False, it gives the size, in voxel of the
            spatial smoothing to apply to the signal.
        two_levels: boolean, optional
            If two_levels is True, the filenames are a list of filenames
            corresponding to various sessions, and a multi-level model is
            applied based on a first CCA.
        n_first_components: integer, optional
            The number of components to retain at the first level. This
            must be specified if two_levels is True.
    """
    # Data preprocessing and loading.
    if not two_levels:
        series, header = mask_utils.series_from_mask(raw_filenames, 
                                                        mask, smooth=smooth)

        # XXX: this should go in series_from_mask
        series -= series.mean(axis=-1)[:, np.newaxis]
        std = series.std(axis=-1)
        std[std==0] = 1
        series /= std[:, np.newaxis]
        del std
        # PCA
        components, loadings, _ = linalg.svd(series, full_matrices=False)
    else:
        if n_first_components is None:
            raise ValueError('If two_levels is True, n_first_components '
                              'must be specified')
        components = list()
        for session_files in raw_filenames:
            these_components, _, header = \
                            session_pca(session_files, mask, smooth=smooth)
            components.append(these_components[:,
                                :n_first_components])
            del these_components
        # CCA
        components = np.hstack(components)
        components, loadings, _ = linalg.svd(components,
                                            full_matrices=False)
    return components, loadings, header
Пример #4
0
gm_img = as_volume_img(gm_file)
epi_ref_img = as_volume_img(epi_ref_file)
# Resample tissue mask to grid of epi
gm_img = gm_img.resampled_to_img(epi_ref_img)
# Extract tissue mask
gm = gm_img.get_data()
# Normalize the mask to [0,1]
gm -= gm.min()
gm /= gm.max()
# Threshold tissue mask
gm_mask = (gm > .5)
# Find largest connected component
gm_mask = mask_utils.largest_cc(gm_mask)

# Extract graymatter voxel timecourses
time_series_gm, header_gm = mask_utils.series_from_mask(epi_files, gm_mask)
time_series_gm = preprocessing.standardize(time_series_gm).T
n_tpts = time_series_gm.shape[0]
# Load motion regressors
motion_regressor = np.loadtxt(os.path.join(BASE_DIR, "fmri", "rp_fga070108233-0004-00002-000002-01.txt"))
motion_regressor = preprocessing.standardize(motion_regressor)
# Bandpass filter to remove DC offset and low frequency drifts
beta, _, _, _ = linalg.lstsq(motion_regressor,time_series_gm)
time_series_gm -= np.dot(motion_regressor,beta)
f_cut = np.array([0.01, 0.1])
tr = 2.4
samp_freq = 1 / tr 
w_cut = f_cut * 2 / samp_freq
b, a = signal.butter(5, w_cut, btype='bandpass', analog=0, output='ba')
for tc in time_series_gm.T:
    tc[:] = signal.filtfilt(b, a, tc) # Modify timer_series_gm in place