Exemplo n.º 1
0
def test_clean_frequencies():

    # Create signal
    sx = np.array([np.sin(np.linspace(0, 100, 100) * 1.5),
                   np.sin(np.linspace(0, 100, 100) * 3.),
                   np.sin(np.linspace(0, 100, 100) / 8.),
                   ]).T

    # Create confound
    _, _, confounds = generate_signals(
        n_features=10, n_confounds=10, length=100)

    # Apply low- and high-pass filter (separately)
    t_r = 1.0
    low_pass = 0.1
    high_pass = 0.4
    res_low = clean(sx, detrend=False, standardize=False, low_pass=low_pass,
                    high_pass=None, t_r=t_r)
    res_high = clean(sx, detrend=False, standardize=False, low_pass=None,
                    high_pass=high_pass, t_r=t_r)

    # Compute power spectrum density for both test
    f, Pxx_den_low = scipy.signal.welch(np.mean(res_low.T, axis=0), fs=t_r)
    f, Pxx_den_high = scipy.signal.welch(np.mean(res_high.T, axis=0), fs=t_r)

    # Verify that the filtered frequencies are removed
    assert_true(np.sum(Pxx_den_low[f >= low_pass * 2.]) <= 1e-4)
    assert_true(np.sum(Pxx_den_high[f <= high_pass / 2.]) <= 1e-4)
Exemplo n.º 2
0
def test_clean_t_r():
    """Different TRs must produce different results after filtering"""
    rng = np.random.RandomState(0)
    n_samples = 34
    # n_features  Must be higher than 500
    n_features = 501
    x_orig = generate_signals_plus_trends(n_features=n_features,
                                          n_samples=n_samples)
    random_tr_list1 = np.round(rng.rand(3) * 10, decimals=2)
    random_tr_list2 = np.round(rng.rand(3) * 10, decimals=2)
    for tr1, tr2 in zip(random_tr_list1, random_tr_list2):
        low_pass_freq_list = tr1 * np.array([1.0 / 100, 1.0 / 110])
        high_pass_freq_list = tr1 * np.array([1.0 / 210, 1.0 / 190])
        for low_cutoff, high_cutoff in zip(low_pass_freq_list,
                                           high_pass_freq_list):
            det_one_tr = nisignal.clean(x_orig, t_r=tr1, low_pass=low_cutoff,
                                        high_pass=high_cutoff)
            det_diff_tr = nisignal.clean(x_orig, t_r=tr2, low_pass=low_cutoff,
                                         high_pass=high_cutoff)

            if not np.isclose(tr1, tr2, atol=0.3):
                msg = ('results do not differ for different TRs: {} and {} '
                       'at cutoffs: low_pass={}, high_pass={} '
                       'n_samples={}, n_features={}'.format(
                           tr1, tr2, low_cutoff, high_cutoff,
                           n_samples, n_features))
                np.testing.assert_(np.any(np.not_equal(det_one_tr, det_diff_tr)),
                                   msg)
                del det_one_tr, det_diff_tr
Exemplo n.º 3
0
def clean_confound(RS, COG, confmat):
    '''
    clean things and zscore things
    '''

    # regress out confound
    z_confound = zscore(confmat)
    # squared measures to help account for potentially nonlinear effects of these confounds
    z2_confound = z_confound**2
    conf_mat = np.hstack((z_confound, z2_confound))

    # Handle nan in z scores
    conf_mat = np.nan_to_num(conf_mat)

    # clean signal
    RS_clean = clean(zscore(RS),
                     confounds=conf_mat,
                     detrend=False,
                     standardize=False)
    COG_clean = clean(zscore(COG),
                      confounds=conf_mat,
                      detrend=False,
                      standardize=False)

    return RS_clean, COG_clean, conf_mat
Exemplo n.º 4
0
def test_clean_psc():
    rng = np.random.RandomState(0)
    n_samples = 500
    n_features = 5

    signals, _, _ = generate_signals(n_features=n_features, length=n_samples)

    # positive mean signal
    means = rng.randn(1, n_features)
    signals_pos_mean = signals + means

    # a mix of pos and neg mean signal
    signals_mixed_mean = signals + np.append(means[:, :-3], -1 * means[:, -3:])

    # both types should pass
    for s in [signals_pos_mean, signals_mixed_mean]:
        cleaned_signals = clean(s, standardize='psc')
        np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)

        cleaned_signals.std(axis=0)
        np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)

        tmp = (s - s.mean(0)) / np.abs(s.mean(0))
        tmp *= 100
        np.testing.assert_almost_equal(cleaned_signals, tmp)

    # leave out the last 3 columns with a mean of zero to test user warning
    signals_w_zero = signals + np.append(means[:, :-3], np.zeros((1, 3)))
    cleaned_w_zero = clean(signals_w_zero, standardize='psc')
    with pytest.warns(UserWarning) as records:
        cleaned_w_zero = clean(signals_w_zero, standardize='psc')
    psc_warning = sum('psc standardization strategy' in str(r.message)
                      for r in records)
    assert psc_warning == 1
    np.testing.assert_equal(cleaned_w_zero[:, -3:].mean(0), 0)
Exemplo n.º 5
0
def test_clean_detrending():
    n_samples = 21
    n_features = 501  # Must be higher than 500
    signals, _, _ = generate_signals(n_features=n_features, length=n_samples)
    trends = generate_trends(n_features=n_features, length=n_samples)
    x = signals + trends

    # if NANs, data out should be False with ensure_finite=True
    y = signals + trends
    y[20, 150] = np.nan
    y[5, 500] = np.nan
    y[15, 14] = np.inf
    y = nisignal.clean(y, ensure_finite=True)
    assert_true(np.any(np.isfinite(y)), True)

    # test boolean is not given to signal.clean
    assert_raises(TypeError, nisignal.clean, x, low_pass=False)
    assert_raises(TypeError, nisignal.clean, x, high_pass=False)

    # This should remove trends
    x_detrended = nisignal.clean(x,
                                 standardize=False,
                                 detrend=True,
                                 low_pass=None,
                                 high_pass=None)
    np.testing.assert_almost_equal(x_detrended, signals, decimal=13)

    # This should do nothing
    x_undetrended = nisignal.clean(x,
                                   standardize=False,
                                   detrend=False,
                                   low_pass=None,
                                   high_pass=None)
    assert_false(abs(x_undetrended - signals).max() < 0.06)
Exemplo n.º 6
0
def test_clean_t_r():
    """Different TRs must produce different results after filtering"""
    rng = np.random.RandomState(42)
    n_samples = 34
    # n_features  Must be higher than 500
    n_features = 501
    x_orig = generate_signals_plus_trends(n_features=n_features,
                                          n_samples=n_samples)
    random_tr_list1 = np.round(rng.uniform(size=3) * 10, decimals=2)
    random_tr_list2 = np.round(rng.uniform(size=3) * 10, decimals=2)
    for tr1, tr2 in zip(random_tr_list1, random_tr_list2):
        low_pass_freq_list = tr1 * np.array([1.0 / 100, 1.0 / 110])
        high_pass_freq_list = tr1 * np.array([1.0 / 210, 1.0 / 190])
        for low_cutoff, high_cutoff in zip(low_pass_freq_list,
                                           high_pass_freq_list):
            det_one_tr = nisignal.clean(x_orig,
                                        t_r=tr1,
                                        low_pass=low_cutoff,
                                        high_pass=high_cutoff)
            det_diff_tr = nisignal.clean(x_orig,
                                         t_r=tr2,
                                         low_pass=low_cutoff,
                                         high_pass=high_cutoff)

            if not np.isclose(tr1, tr2, atol=0.3):
                msg = ('results do not differ for different TRs: {} and {} '
                       'at cutoffs: low_pass={}, high_pass={} '
                       'n_samples={}, n_features={}'.format(
                           tr1, tr2, low_cutoff, high_cutoff, n_samples,
                           n_features))
                np.testing.assert_(
                    np.any(np.not_equal(det_one_tr, det_diff_tr)), msg)
                del det_one_tr, det_diff_tr
    def __img_to_tseries(self, filename_img, mask_img, confounds_filename=None, smoothing_fwhm=None,
                         strategy=["minimal"], chunksize=3000):

        img = nb.load(filename_img)

        img = resample_to_img(img, mask_img)

        if confounds_filename is not None:

             confounds = load_confounds(confounds_filename, strategy=strategy)

        masker = NiftiMasker(mask_img, smoothing_fwhm=smoothing_fwhm)

        masker.fit(img)

        masker.mask_img_ = mask_img

        tseries = masker.transform(img)

        if confounds_filename is not None:

            confounds=confounds.values

            tseries = clean(tseries, confounds=confounds)

        else:

            tseries = clean(tseries)

        tseries = self.__normalize_data(tseries)

        darr_tseries = da.from_array(tseries, chunks=(chunksize, tseries.shape[1]))

        return darr_tseries
Exemplo n.º 8
0
def test_clean_detrending():
    n_samples = 21
    n_features = 501  # Must be higher than 500
    signals, _, _ = generate_signals(n_features=n_features,
                                     length=n_samples)
    trends = generate_trends(n_features=n_features,
                             length=n_samples)
    x = signals + trends

    # if NANs, data out should be False with ensure_finite=True
    y = signals + trends
    y[20, 150] = np.nan
    y[5, 500] = np.nan
    y[15, 14] = np.inf
    y = nisignal.clean(y, ensure_finite=True)
    assert_true(np.any(np.isfinite(y)), True)

    # test boolean is not given to signal.clean
    assert_raises(TypeError, nisignal.clean, x, low_pass=False)
    assert_raises(TypeError, nisignal.clean, x, high_pass=False)

    # This should remove trends
    x_detrended = nisignal.clean(x, standardize=False, detrend=True,
                                 low_pass=None, high_pass=None)
    np.testing.assert_almost_equal(x_detrended, signals, decimal=13)

    # This should do nothing
    x_undetrended = nisignal.clean(x, standardize=False, detrend=False,
                                   low_pass=None, high_pass=None)
    assert_false(abs(x_undetrended - signals).max() < 0.06)
Exemplo n.º 9
0
def test_clean_frequencies_using_power_spectrum_density():

    # Create signal
    sx = np.array([np.sin(np.linspace(0, 100, 100) * 1.5),
                   np.sin(np.linspace(0, 100, 100) * 3.),
                   np.sin(np.linspace(0, 100, 100) / 8.),
                   ]).T

    # Create confound
    _, _, confounds = generate_signals(
        n_features=10, n_confounds=10, length=100)

    # Apply low- and high-pass filter (separately)
    t_r = 1.0
    low_pass = 0.1
    high_pass = 0.4
    res_low = clean(sx, detrend=False, standardize=False, low_pass=low_pass,
                    high_pass=None, t_r=t_r)
    res_high = clean(sx, detrend=False, standardize=False, low_pass=None,
                     high_pass=high_pass, t_r=t_r)

    # Compute power spectrum density for both test
    f, Pxx_den_low = scipy.signal.welch(np.mean(res_low.T, axis=0), fs=t_r)
    f, Pxx_den_high = scipy.signal.welch(np.mean(res_high.T, axis=0), fs=t_r)

    # Verify that the filtered frequencies are removed
    assert np.sum(Pxx_den_low[f >= low_pass * 2.]) <= 1e-4
    assert np.sum(Pxx_den_high[f <= high_pass / 2.]) <= 1e-4
Exemplo n.º 10
0
def test_clean_frequencies():
    sx1 = np.sin(np.linspace(0, 100, 2000))
    sx2 = np.sin(np.linspace(0, 100, 2000))
    sx = np.vstack((sx1, sx2)).T
    assert_true(clean(sx, standardize=False, high_pass=0.002, low_pass=None)
                .max() > 0.1)
    assert_true(clean(sx, standardize=False, high_pass=0.2, low_pass=None)
                .max() < 0.01)
    assert_true(clean(sx, standardize=False, low_pass=0.01).max() > 0.9)
    assert_raises(ValueError, clean, sx, low_pass=0.4, high_pass=0.5)
Exemplo n.º 11
0
def test_clean_frequencies():
    sx1 = np.sin(np.linspace(0, 100, 2000))
    sx2 = np.sin(np.linspace(0, 100, 2000))
    sx = np.vstack((sx1, sx2)).T
    assert_true(clean(sx, standardize=False, high_pass=0.002, low_pass=None)
                .max() > 0.1)
    assert_true(clean(sx, standardize=False, high_pass=0.2, low_pass=None)
                .max() < 0.01)
    assert_true(clean(sx, standardize=False, low_pass=0.01).max() > 0.9)
    assert_raises(ValueError, clean, sx, low_pass=0.4, high_pass=0.5)
Exemplo n.º 12
0
def custom_clean(X,
                 Y,
                 C,
                 tr,
                 ddict,
                 cfg,
                 high_pass=True,
                 clean_Y=True,
                 standardize=True):
    """ High-passes (optional) and removes confounds (C) from both
    design matrix (X) and data (Y).

    Parameters
    ----------
    X : pd.DataFrame
        Dataframe with design (timepoints x conditions)
    Y : np.ndarray
        2D numpy array (timepoints x voxels)
    C : pd.DataFrame
        Dataframe with confounds (timepoints x confound variables)
    high_pass : bool
        Whether to high-pass the data or not
    clean_Y : bool
        Whether to also clean Y
    standardize : bool/str
        Whether to standardize the data after cleaning
    """

    if 'constant' in X.columns:
        X = X.drop('constant', axis=1)

    if high_pass:
        # Note to self: Y and C are, by definition, already high-pass filtered
        X.loc[:, :] = hp_filter(X.to_numpy(),
                                tr,
                                ddict,
                                cfg,
                                standardize=False)

    if C is not None:  # remove confounds from X
        X.loc[:, :] = signal.clean(X.to_numpy(),
                                   detrend=False,
                                   standardize=False,
                                   confounds=C)

    if clean_Y:
        Y = signal.clean(Y.copy(),
                         detrend=False,
                         confounds=C,
                         standardize=standardize)

    return X, Y
Exemplo n.º 13
0
def test_clean_frequencies():
    sx1 = np.sin(np.linspace(0, 100, 2000))
    sx2 = np.sin(np.linspace(0, 100, 2000))
    sx = np.vstack((sx1, sx2)).T
    sx_orig = sx.copy()
    assert clean(sx, standardize=False, high_pass=0.002, low_pass=None,
                      t_r=2.5).max() > 0.1
    assert clean(sx, standardize=False, high_pass=0.2, low_pass=None,
                      t_r=2.5) .max() < 0.01
    assert clean(sx, standardize=False, low_pass=0.01, t_r=2.5).max() > 0.9
    pytest.raises(ValueError, clean, sx, low_pass=0.4, high_pass=0.5, t_r=2.5)

    # clean should not modify inputs
    sx_cleaned = clean(sx, standardize=False, detrend=False, low_pass=0.2, t_r=2.5)
    assert np.array_equal(sx_orig, sx)
Exemplo n.º 14
0
def load_fmri(filename, maskname='', sigma=3):
    """
		Reads 4D fmri data. 
		Smooths using 3D Gaussian filter
		Applies mask to data: 
			- If mask not provided, calculates binary mask
		returns fmri data matrix (Time x Voxels)
	"""
    img = nib.load(filename)
    print(img.shape)
    rep_time = img.header.get_zooms()[-1]
    img = image.smooth_img(img, sigma)
    if maskname != '':
        img_mask = nib.load(maskname)
    else:
        print('Mask not provided. Calculating mask ...')
        img_mask = masking.compute_background_mask(img)
    img = masking.apply_mask(img, img_mask)
    print('Mask applied!')
    print('Detrending data!')
    img = signal.clean(img,
                       detrend=True,
                       high_pass=0.01,
                       standardize=False,
                       t_r=rep_time)
    return img
Exemplo n.º 15
0
def proc_file(file, mapper, save_dr):

     # Get subject name
    name = file.split('/')[-1].split('_')[0].replace('sub-', '')

    # Gen save loc
    save_loc = os.path.join(save_dr, name + '.npy')

    # Check if already exists - if so skip
    if os.path.exists(save_loc):
        return

    # Load data
    data = nib.load(file).get_fdata()

    # Apply mapper
    data = mapper.fit_transform(data)

    # Run clean
    data = clean(signals=data,
                 detrend=True,
                 standardize='zscore')

    # Cast to float32
    data = data.astype('float32')
    
    # Save
    np.save(save_loc, data)
    print(f'saved {save_loc}', flush=True)
Exemplo n.º 16
0
def connnect_creation(df_int, kind='correlation'):
    """Create connectivity."""
    order = df_int[1]
    session_nb = str(df_int[2])
    filename_id = df_int[3][:-9]
    ts_dirty = load(filename_id + 'basc064' + '/' + 'rfMRI_REST' + session_nb +
                    '_' + order + '_raw')

    ts_ortho = np.loadtxt(filename_id + 'confounds' + '/' + 'rfMRI_REST' +
                          session_nb + '_' + order +
                          '_Movement_Regressors.txt')
    ts = signal.clean(ts_dirty,
                      detrend=True,
                      standardize=True,
                      confounds=ts_ortho,
                      low_pass=None,
                      high_pass=None,
                      t_r=0.72,
                      ensure_finite=False)

    conn_measure = connectome.ConnectivityMeasure(kind=kind)
    indiv_connect_mat[kind] = conn_measure.fit_transform([ts])
    mean_connect_mat[kind] = indiv_connect_mat[kind].mean(axis=0)
    connectivity_coefs = connectome.sym_to_vec(indiv_connect_mat[kind],
                                               discard_diagonal=True)
    return connectivity_coefs, ts
Exemplo n.º 17
0
def test_clean_img():

    rng = np.random.RandomState(0)

    data = rng.randn(10, 10, 10, 100) + .5
    data_flat = data.T.reshape(100, -1)
    data_img = nibabel.Nifti1Image(data, np.eye(4))

    data_img_ = image.clean_img(data_img,
                                detrend=True,
                                standardize=False,
                                low_pass=0.1)
    data_flat_ = signal.clean(data_flat,
                              detrend=True,
                              standardize=False,
                              low_pass=0.1)

    np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1),
                                   data_flat_)
    # if NANs
    data[:, 9, 9] = np.nan
    # if infinity
    data[:, 5, 5] = np.inf
    nan_img = nibabel.Nifti1Image(data, np.eye(4))
    clean_im = image.clean_img(nan_img, ensure_finite=True)
    assert_true(np.any(np.isfinite(clean_im.get_data())), True)

    # test_clean_img_passing_nifti2image
    data_img_nifti2 = nibabel.Nifti2Image(data, np.eye(4))

    data_img_nifti2_ = image.clean_img(data_img_nifti2,
                                       detrend=True,
                                       standardize=False,
                                       low_pass=0.1)
Exemplo n.º 18
0
def test_clean_img():

    rng = np.random.RandomState(0)

    data = rng.randn(10, 10, 10, 100) + .5
    data_flat = data.T.reshape(100, -1)
    data_img = nibabel.Nifti1Image(data, np.eye(4))

    assert_raises(
        ValueError, image.clean_img, data_img, t_r=None, low_pass=0.1)

    data_img_ = image.clean_img(
        data_img, detrend=True, standardize=False, low_pass=0.1, t_r=1.0)
    data_flat_ = signal.clean(
        data_flat, detrend=True, standardize=False, low_pass=0.1, t_r=1.0)

    np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1),
                                   data_flat_)
    # if NANs
    data[:, 9, 9] = np.nan
    # if infinity
    data[:, 5, 5] = np.inf
    nan_img = nibabel.Nifti1Image(data, np.eye(4))
    clean_im = image.clean_img(nan_img, ensure_finite=True)
    assert_true(np.any(np.isfinite(clean_im.get_data())), True)

    # test_clean_img_passing_nifti2image
    data_img_nifti2 = nibabel.Nifti2Image(data, np.eye(4))

    data_img_nifti2_ = image.clean_img(
        data_img_nifti2, detrend=True, standardize=False, low_pass=0.1, t_r=1.0)
Exemplo n.º 19
0
def test_regressors(mock_data, tmpdir, basic_regressor_config):

    config_file = os.path.join(tmpdir, 'config.json')
    with open(config_file, 'w') as fp:
        json.dump(basic_regressor_config, fp)
    
    lh_label = os.path.join(mock_data, 'schaefer_hemi-L.label.gii')
    rh_label = os.path.join(mock_data, 'schaefer_hemi-R.label.gii')

    lh_func = os.path.join(mock_data, 'schaefer_hemi-L.func.gii')
    rh_func = os.path.join(mock_data, 'schaefer_hemi-R.func.gii')

    # check will all scans
    cmd = (f"nixtract-gifti {tmpdir} --lh_files {lh_func} --rh_files {rh_func} "
           f"--lh_roi_file {lh_label} --rh_roi_file {rh_label} "
           f"-c {config_file}")
    subprocess.run(cmd.split())

    tseries = os.path.join(tmpdir, 'schaefer_hemi-LR_timeseries.tsv')
    assert os.path.exists(tseries)
    actual = pd.read_table(tseries).values

    expected_hemi = np.tile(np.arange(1, 51), (10, 1))
    expected = np.concatenate([expected_hemi, expected_hemi], axis=1)
    
    regressors = pd.read_table(basic_regressor_config['regressor_files'], 
                               usecols=basic_regressor_config['regressors'])
    expected = signal.clean(expected, confounds=regressors, standardize=False, 
                            detrend=False)
    assert np.allclose(actual, expected)

    # check with discard scans
    cmd = (f"nixtract-gifti {tmpdir} --lh_files {lh_func} --rh_files {rh_func} "
        f"--lh_roi_file {lh_label} --rh_roi_file {rh_label} "
        f"-c {config_file} --discard_scans 3")
    subprocess.run(cmd.split())

    tseries = os.path.join(tmpdir, 'schaefer_hemi-LR_timeseries.tsv')
    assert os.path.exists(tseries)
    actual = pd.read_table(tseries).values

    expected_hemi = np.tile(np.arange(1, 51), (7, 1))
    expected = np.concatenate([expected_hemi, expected_hemi], axis=1)
    regressors = regressors.values[3:, :]
    expected = signal.clean(expected, confounds=regressors, standardize=False, 
                            detrend=False)
    assert np.allclose(actual, expected)
Exemplo n.º 20
0
 def test_detrend_from_nilearn(self):
     test_data = sio.loadmat(
         os.path.join(self.data_path, 'detrend',
                      'MyDetrend_normalize_nii.mat'))
     result = signal.clean(signals=np.transpose(test_data['TC']),
                           t_r=1,
                           detrend=False)
     self.assertEquals(test_data['TCN'].shape, result.shape)
     np.testing.assert_allclose(test_data['TCN'], result, rtol=0.01)
Exemplo n.º 21
0
def test_clean_detrending():
    n_samples = 21
    n_features = 501  # Must be higher than 500
    signals, _, _ = generate_signals(n_features=n_features,
                                     length=n_samples)
    trends = generate_trends(n_features=n_features,
                             length=n_samples)
    x = signals + trends

    # This should remove trends
    x_detrended = nisignal.clean(x, standardize=False, detrend=True,
                                 low_pass=None, high_pass=None)
    np.testing.assert_almost_equal(x_detrended, signals, decimal=13)

    # This should do nothing
    x_undetrended = nisignal.clean(x, standardize=False, detrend=False,
                                   low_pass=None, high_pass=None)
    assert_false(abs(x_undetrended - signals).max() < 0.06)
Exemplo n.º 22
0
def test_clean_detrending():
    n_samples = 21
    n_features = 501  # Must be higher than 500
    signals, _, _ = generate_signals(n_features=n_features,
                                     length=n_samples)
    trends = generate_trends(n_features=n_features,
                             length=n_samples)
    x = signals + trends

    # This should remove trends
    x_detrended = nisignal.clean(x, standardize=False, detrend=True,
                                 low_pass=None, high_pass=None)
    np.testing.assert_almost_equal(x_detrended, signals, decimal=13)

    # This should do nothing
    x_undetrended = nisignal.clean(x, standardize=False, detrend=False,
                                   low_pass=None, high_pass=None)
    assert_false(abs(x_undetrended - signals).max() < 0.06)
Exemplo n.º 23
0
def test_clean_detrending():
    n_samples = 21
    n_features = 501  # Must be higher than 500
    signals, _, _ = generate_signals(n_features=n_features, length=n_samples)
    trends = generate_trends(n_features=n_features, length=n_samples)
    x = signals + trends
    x_orig = x.copy()

    # if NANs, data out should be False with ensure_finite=True
    y = signals + trends
    y[20, 150] = np.nan
    y[5, 500] = np.nan
    y[15, 14] = np.inf
    y_orig = y.copy()

    y_clean = nisignal.clean(y, ensure_finite=True)
    assert np.any(np.isfinite(y_clean)), True
    # clean should not modify inputs
    # using assert_almost_equal instead of array_equal due to NaNs
    np.testing.assert_almost_equal(y_orig, y, decimal=13)

    # test boolean is not given to signal.clean
    pytest.raises(TypeError, nisignal.clean, x, low_pass=False)
    pytest.raises(TypeError, nisignal.clean, x, high_pass=False)

    # This should remove trends
    x_detrended = nisignal.clean(x,
                                 standardize=False,
                                 detrend=True,
                                 low_pass=None,
                                 high_pass=None)
    np.testing.assert_almost_equal(x_detrended, signals, decimal=13)
    # clean should not modify inputs
    assert np.array_equal(x_orig, x)

    # This should do nothing
    x_undetrended = nisignal.clean(x,
                                   standardize=False,
                                   detrend=False,
                                   low_pass=None,
                                   high_pass=None)
    assert not abs(x_undetrended - signals).max() < 0.06
    # clean should not modify inputs
    assert np.array_equal(x_orig, x)
Exemplo n.º 24
0
def test_regressors(data_dir, mock_data, basic_regressor_config, tmpdir):

    dtseries = os.path.join(mock_data, 'gordon.dtseries.nii')
    roi_file = os.path.join(
        data_dir, 'Gordon333_FreesurferSubcortical.32k_fs_LR.dlabel.nii')

    # actual data (all scans = 10)
    config_file = os.path.join(tmpdir, 'config.json')
    with open(config_file, 'w') as fp:
        json.dump(basic_regressor_config, fp)
    cmd = (f"nixtract-cifti {tmpdir} --input_files {dtseries} "
           f"--roi_file {roi_file}  -c {config_file}")
    subprocess.run(cmd.split())
    actual = pd.read_table(os.path.join(tmpdir,
                                        'gordon_timeseries.tsv')).values

    # expected data (all scans = 10)
    regressors = pd.read_table(basic_regressor_config['regressor_files'],
                               usecols=basic_regressor_config['regressors'])
    expected = np.tile(np.arange(1, 353), (10, 1))
    expected = signal.clean(expected,
                            confounds=regressors,
                            standardize=False,
                            detrend=False)
    assert np.allclose(actual, expected)

    # actual data (discard 3 scans)
    cmd = (f"nixtract-cifti {tmpdir} --input_files {dtseries} "
           f"--roi_file {roi_file} --discard_scans 3 -c {config_file}")
    subprocess.run(cmd.split())
    actual = pd.read_table(os.path.join(tmpdir, 'gordon_timeseries.tsv'))

    # expected data (discard 3 scans)
    regressors = pd.read_table(basic_regressor_config['regressor_files'],
                               usecols=basic_regressor_config['regressors'])
    # discard first three rows to match up with discard scans
    regressors = regressors.values[3:, :]
    expected = np.tile(np.arange(1, 353), (7, 1))
    expected = signal.clean(expected,
                            confounds=regressors,
                            standardize=False,
                            detrend=False)

    assert np.allclose(actual.values, expected)
Exemplo n.º 25
0
    def shape_estimation(self, conf_vars=None, osf=30, slice_time_ref=0.5,
                         separate_conditions=False, **rf_args):
        
        if not self.preprocessed:
            raise ValueError("Data was not preprocessed yet!")

        self.logger.info(f"Starting HRF estimation for task {self.task}.")
        func_cleans, concat_events = [], []
        for run in range(self.n_runs):
            this_conf = self.preproc['confs'][run].values
            func_clean = signal.clean(
                signals=self.preproc['func_ts'][run],
                confounds=this_conf,
                detrend=False,
                standardize=True,
                t_r=self.tr[run]
            )
            func_cleans.append(func_clean)
            
            event = self.preproc['events'][run].copy()  
            # copy, otherwise shape_estimation interferes with glm_detect
            event.loc[:, 'onset'] += run * this_conf.shape[0] * TR
            concat_events.append(event)

        func_concat = np.concatenate(func_cleans)
        event_concat = pd.concat(concat_events, axis=0)#self.preproc['events'], axis=0)

        if not np.all(self.tr[0] == np.array(self.tr)):
            self.logger.warning("Not all TRs are the same, but running ResponseFitter on concat signal!")
        
        rf = ResponseFitter(
            input_signal=func_concat,
            sample_rate=1/self.tr[0],
            add_intercept=True,
            slice_time_ref=slice_time_ref,
            oversample_design_matrix=osf
        )
        
        if 'interval' not in rf_args.keys():
            rf_args['interval'] = [0, 20]
        
        if 'basis_set' not in rf_args.keys():
            rf_args['basis_set'] = 'fourier'
            
        if 'n_regressors' not in rf_args.keys():
            rf_args['n_regressors'] = 6
        
        if separate_conditions:
            for con in event_concat.trial_type.unique():
                onsets = event_concat.query('trial_type == @con').onset
                rf.add_event(con, onsets, **rf_args)
        else:
            rf.add_event('stim', event_concat.onset, **rf_args)

        rf.fit()
        self.rf = rf
Exemplo n.º 26
0
def clean_timeserie(ts, motion):
    """ Returns cleaned timeserie
    """
    return clean(ts,
                 detrend=False,
                 standardize=False,
                 high_pass=None,
                 low_pass=.1,
                 t_r=1.05,
                 confounds=motion)
Exemplo n.º 27
0
def clean_volumes(volumes, y, by_col):
    cleaner = lambda col_name: np.atleast_2d(StandardScaler().fit_transform(
        np.nan_to_num(y[col_name].values[:, None])))
    conf_mat = np.hstack([cleaner(col) for col in by_col])

    volumes_deconf = signal.clean(volumes.values,
                                  confounds=conf_mat,
                                  detrend=False,
                                  standardize=False)
    return pd.DataFrame(volumes_deconf, index=volumes.index), y
Exemplo n.º 28
0
def test_clean_zscore():
    rng = np.random.RandomState(42)
    n_samples = 500
    n_features = 5

    signals, _, _ = generate_signals(n_features=n_features, length=n_samples)

    signals += rng.standard_normal(size=(1, n_features))
    cleaned_signals = clean(signals, standardize='zscore')
    np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)
    np.testing.assert_almost_equal(cleaned_signals.std(0), 1)
Exemplo n.º 29
0
def hp_filter(data, tr, ddict, cfg, standardize=True):
    """ High-pass filter (DCT or Savitsky-Golay). """

    n_vol = data.shape[0]
    st_ref = cfg['slice_time_ref']  # offset frametimes by st_ref * tr
    ft = np.linspace(st_ref * tr, (n_vol + st_ref) * tr, n_vol, endpoint=False)

    # Create high-pass filter and clean
    if cfg['high_pass_type'] == 'dct':
        hp_set = dct_set(cfg['high_pass'], ft)
        data = signal.clean(data,
                            detrend=False,
                            standardize=standardize,
                            confounds=hp_set)
    else:  # savgol, hardcode polyorder (maybe make argument?)
        window = int(np.round((1 / cfg['high_pass']) / tr))
        data -= savgol_filter(data, window_length=window, polyorder=2, axis=0)
        if standardize:
            data = signal.clean(data, detrend=False, standardize=standardize)

    return data
Exemplo n.º 30
0
def test_clean_finite_no_inplace_mod():
    """
    Test for verifying that the passed in signal array is not modified.
    For PR #2125 . This test is failing on master, passing in this PR.
    """
    n_samples = 2
    # n_features  Must be higher than 500
    n_features = 501
    x_orig, _, _ = generate_signals(n_features=n_features, length=n_samples)
    x_orig_inital_copy = x_orig.copy()

    x_orig_with_nans = x_orig.copy()
    x_orig_with_nans[0, 0] = np.nan
    x_orig_with_nans_initial_copy = x_orig_with_nans.copy()

    cleaned_x_orig = clean(x_orig)
    assert np.array_equal(x_orig, x_orig_inital_copy)

    cleaned_x_orig_with_nans = clean(x_orig_with_nans, ensure_finite=True)
    assert np.isnan(x_orig_with_nans_initial_copy[0, 0])
    assert np.isnan(x_orig_with_nans[0, 0])
Exemplo n.º 31
0
def test_clean_img():

    rng = np.random.RandomState(0)

    data = rng.randn(10, 10, 10, 100) + .5
    data_flat = data.T.reshape(100, -1)
    data_img = nibabel.Nifti1Image(data, np.eye(4))

    assert_raises(ValueError,
                  image.clean_img,
                  data_img,
                  t_r=None,
                  low_pass=0.1)

    data_img_ = image.clean_img(data_img,
                                detrend=True,
                                standardize=False,
                                low_pass=0.1,
                                t_r=1.0)
    data_flat_ = signal.clean(data_flat,
                              detrend=True,
                              standardize=False,
                              low_pass=0.1,
                              t_r=1.0)

    np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1),
                                   data_flat_)
    # if NANs
    data[:, 9, 9] = np.nan
    # if infinity
    data[:, 5, 5] = np.inf
    nan_img = nibabel.Nifti1Image(data, np.eye(4))
    clean_im = image.clean_img(nan_img, ensure_finite=True)
    assert_true(np.any(np.isfinite(clean_im.get_data())), True)

    # test_clean_img_passing_nifti2image
    data_img_nifti2 = nibabel.Nifti2Image(data, np.eye(4))

    data_img_nifti2_ = image.clean_img(data_img_nifti2,
                                       detrend=True,
                                       standardize=False,
                                       low_pass=0.1,
                                       t_r=1.0)

    # if mask_img
    img, mask_img = data_gen.generate_fake_fmri(shape=(10, 10, 10), length=10)
    data_img_mask_ = image.clean_img(img, mask_img=mask_img)

    # Checks that output with full mask and without is equal
    data_img_ = image.clean_img(img)
    np.testing.assert_almost_equal(data_img_.get_data(),
                                   data_img_mask_.get_data())
Exemplo n.º 32
0
    def _run_interface(self, runtime):
        func_nii = nb.load(self.inputs.in_file)
        func_data = func_nii.get_data()
        func_shape = func_data.shape
        ntsteps = func_shape[-1]
        tr = func_nii.header.get_zooms()[-1]
        nskip = self.inputs.skip_frames

        if self.inputs.detrend:
            from nilearn.signal import clean

            data = func_data.reshape(-1, ntsteps)
            clean_data = clean(data[:, nskip:].T, t_r=tr, standardize=False).T
            new_shape = (
                func_shape[0],
                func_shape[1],
                func_shape[2],
                clean_data.shape[-1],
            )
            func_data = np.zeros(func_shape)
            func_data[..., nskip:] = clean_data.reshape(new_shape)

        if not isdefined(self.inputs.in_mask):
            _, mask_data, _ = auto_mask(func_data,
                                        nskip=self.inputs.skip_frames)
        else:
            mask_data = nb.load(self.inputs.in_mask).get_data()
            mask_data[..., :nskip] = 0
            mask_data = np.stack([mask_data] * ntsteps, axis=-1)

        if not self.inputs.invert_mask:
            brain = np.ma.array(func_data, mask=(mask_data != 1))
        else:
            mask_data[..., :self.inputs.skip_frames] = 1
            brain = np.ma.array(func_data, mask=(mask_data == 1))

        if self.inputs.no_zscore:
            ts_z = find_peaks(brain)
            total_spikes = []
        else:
            total_spikes, ts_z = find_spikes(brain, self.inputs.spike_thresh)
        total_spikes = list(set(total_spikes))

        out_tsz = op.abspath(self.inputs.out_tsz)
        self._results["out_tsz"] = out_tsz
        np.savetxt(out_tsz, ts_z)

        out_spikes = op.abspath(self.inputs.out_spikes)
        self._results["out_spikes"] = out_spikes
        np.savetxt(out_spikes, total_spikes)
        self._results["num_spikes"] = len(total_spikes)
        return runtime
Exemplo n.º 33
0
def residualize_group_data(signals, confounds):
    '''
    regresses out confounds from signals
    signals.shape: subjects x n_data_points
    confounds.shape: subjects x n_confounds
    returns residualized_signals.shape: subjects x n_data_points
    '''
    from nilearn.signal import clean
    import numpy as np
    counfounds_plus_constant = np.concatenate((confounds, np.ones((confounds.shape[0], 1))), axis=1)
    residualized_signals = clean(signals, detrend=False, standardize=False, confounds=counfounds_plus_constant,
                                 low_pass=None, high_pass=None, t_r=None)
    return residualized_signals
Exemplo n.º 34
0
def compute_confounds(imgs, mask_img, n_confounds=5, get_randomized_svd=False,
                      compute_not_mask=False):
    """
    """
    confounds = []
    if not isinstance(imgs, collections.Iterable) or \
            isinstance(imgs, _basestring):
        imgs = [imgs, ]

    img = _utils.check_niimg_4d(imgs[0])
    shape = img.shape[:3]
    affine = get_affine(img)

    if isinstance(mask_img, _basestring):
        mask_img = _utils.check_niimg_3d(mask_img)

    if not _check_same_fov(img, mask_img):
        mask_img = resample_img(
            mask_img, target_shape=shape, target_affine=affine,
            interpolation='nearest')

    if compute_not_mask:
        print("Non mask based confounds extraction")
        not_mask_data = np.logical_not(mask_img.get_data().astype(np.int))
        whole_brain_mask = masking.compute_multi_epi_mask(imgs)
        not_mask = np.logical_and(not_mask_data, whole_brain_mask.get_data())
        mask_img = new_img_like(img, not_mask.astype(np.int), affine)

    for img in imgs:
        print("[Confounds Extraction] {0}".format(img))
        img = _utils.check_niimg_4d(img)
        print("[Confounds Extraction] high ariance confounds computation]")
        high_variance = high_variance_confounds(img, mask_img=mask_img,
                                                n_confounds=n_confounds)
        if compute_not_mask and get_randomized_svd:
            signals = masking.apply_mask(img, mask_img)
            non_constant = np.any(np.diff(signals, axis=0) != 0, axis=0)
            signals = signals[:, non_constant]
            signals = signal.clean(signals, detrend=True)
            print("[Confounds Extraction] Randomized SVD computation")
            U, s, V = randomized_svd(signals, n_components=n_confounds,
                                     random_state=0)
            if high_variance is not None:
                confound_ = np.hstack((U, high_variance))
            else:
                confound_ = U
        else:
            confound_ = high_variance
        confounds.append(confound_)

    return confounds
Exemplo n.º 35
0
def _load_clean_one(X, masker):
    LEN_FSAV = 163842
    X_data = nib.load(X).agg_data()
    X_ = signal.clean(np.asarray(X_data),
                      detrend=masker.detrend,
                      high_pass=masker.high_pass,
                      low_pass=masker.low_pass,
                      standardize=masker.standardize,
                      t_r=masker.t_r,
                      ensure_finite=True)
    if X_.shape[0] == LEN_FSAV:
        return X_.T
    else:
        return X_
Exemplo n.º 36
0
    def _merge_and_reduce(self, input_files):

        """

        Clean, temporally reduce, and concatenate resting state matrix files.

        :param input_files: list of input resting state matrix files
        :return signals: concatenated resting state arrays
        """

        signals = []

        for inp in input_files:

            print('Loading {:}'.format(inp.split('/')[-1]))

            matrix = loaded.load(inp)

            try:
                z
            except NameError:
                z = np.zeros((matrix.shape[0],))
            else:
                pass
            finally:
                zinds = np.where(np.abs(matrix).sum(1) == 0)[0]
                if len(zinds) > 3000:
                    pass
                else:
                    z[zinds] += 1

                    matrix = clean(matrix,standardize=self.standardize,
                                low_pass=self.low_pass,high_pass=self.high_pass,
                                t_r=self.t_r)

                    if self.pca_filter:
                        matrix = self._reduce(matrix)

                    signals.append(matrix)

        self.mask = z.astype(np.bool)
        print(self.mask.sum())
        print(len(signals))
        signals = np.column_stack(signals)
        signals = signals[~self.mask, :]
        
        print(signals.shape)

        return signals.T
Exemplo n.º 37
0
def mask_data(darray,
              roi,
              regressors=None,
              as_vertices=False,
              pre_clean=False,
              **kwargs):
    """[summary]

    Parameters
    ----------
    darray : numpy.ndarray, (n_timepoints, n_vertices)
        Functional vertices
    roi : numpy.ndarray, (n_vertices,)
        Vertices with integer labels denoting the regions
    regressors : numpy.ndarray, optional
        Confound regressors to regress from timeseries, by default None
    as_vertices : bool, optional
        Extract all vertices beloging to a label in roi. Only possible when
        roi is a binary mask, by default False
    pre_clean : bool, optional
        Run nilearn.signal.clean on all vertices prior to masking, rather than
        after. By default False

    Returns
    -------
    numpy.ndarray
        Extracted timeseries
    """
    x = darray.copy()
    if pre_clean:
        x = signal.clean(x, confounds=regressors, **kwargs)
        return _mask(x, roi, as_vertices)
    else:
        timeseries = _mask(x, roi, as_vertices)
        out = signal.clean(timeseries, confounds=regressors, **kwargs)
        return out
Exemplo n.º 38
0
def test_clean_img():

    rng = np.random.RandomState(0)

    data = rng.randn(10, 10, 10, 100) + .5
    data_flat = data.T.reshape(100, -1)
    data_img = nibabel.Nifti1Image(data, np.eye(4))

    data_img_ = image.clean_img(
        data_img, detrend=True, standardize=False, low_pass=0.1)
    data_flat_ = signal.clean(
        data_flat, detrend=True, standardize=False, low_pass=0.1)

    np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1),
                                   data_flat_)
Exemplo n.º 39
0
def preprocess_bold_fmri(bold,
                         mask=None,
                         detrend=True,
                         standardize='zscore',
                         **kwargs):
    '''Preprocesses data and returns ndarray.'''
    if mask:
        data = apply_mask(bold, mask)
    else:
        if not isinstance(bold, Nifti1Image):
            data = load(bold).get_data()
        else:
            data = bold.get_data()
        data = np.reshape(data, (-1, data.shape[-1])).T
    return clean(data, detrend=detrend, standardize=standardize, **kwargs)
def out_brain_confounds(epi_img, mask_img):
    """ Return the 5 principal components of the signal outside the
        brain.
    """
    mask_img = check_niimg(mask_img)
    mask_img = nibabel.Nifti1Image(
        np.logical_not(mask_img.get_data()).astype(np.int),
        mask_img.get_affine())
    sigs = masking.apply_mask(epi_img, mask_img)
    # Remove the constant signals
    non_constant = np.any(np.diff(sigs, axis=0) != 0, axis=0)
    sigs = sigs[:, non_constant]
    sigs = signal.clean(sigs, detrend=True)
    U, s, V = randomized_svd(sigs, 5, random_state=0)
    return U
Exemplo n.º 41
0
    def _run_interface(self, runtime):
        func_nii = nb.load(self.inputs.in_file)
        func_data = func_nii.get_data()
        func_shape = func_data.shape
        ntsteps = func_shape[-1]
        tr = func_nii.get_header().get_zooms()[-1]
        nskip = self.inputs.skip_frames

        if self.inputs.detrend:
            data = func_data.reshape(-1, ntsteps)
            clean_data = clean(data[:, nskip:].T, t_r=tr, standardize=False).T
            new_shape = (
                func_shape[0], func_shape[1], func_shape[2], clean_data.shape[-1])
            func_data = np.zeros(func_shape)
            func_data[..., nskip:] = clean_data.reshape(new_shape)

        if not isdefined(self.inputs.in_mask):
            _, mask_data, _ = auto_mask(
                func_data, nskip=self.inputs.skip_frames)
        else:
            mask_data = nb.load(self.inputs.in_mask).get_data()
            mask_data[..., :nskip] = 0
            mask_data = np.stack([mask_data] * ntsteps, axis=-1)

        if not self.inputs.invert_mask:
            brain = np.ma.array(func_data, mask=(mask_data != 1))
        else:
            mask_data[..., :self.inputs.skip_frames] = 1
            brain = np.ma.array(func_data, mask=(mask_data == 1))

        if self.inputs.no_zscore:
            ts_z = find_peaks(brain)
            total_spikes = []
        else:
            total_spikes, ts_z = find_spikes(
                brain, self.inputs.spike_thresh)
        total_spikes = list(set(total_spikes))

        out_tsz = op.abspath(self.inputs.out_tsz)
        self._results['out_tsz'] = out_tsz
        np.savetxt(out_tsz, ts_z)

        out_spikes = op.abspath(self.inputs.out_spikes)
        self._results['out_spikes'] = out_spikes
        np.savetxt(out_spikes, total_spikes)
        self._results['num_spikes'] = len(total_spikes)
        return runtime
def sym_to_vec(symmetric, discard_diagonal=False, confounds=None):
    """Return the flattened lower triangular part of an array.

    If diagonal is kept, diagonal elements are divided by sqrt(2) to conserve
    the norm.

    Acts on the last two dimensions of the array if not 2-dimensional.

    .. versionadded:: 0.2

    Parameters
    ----------
    symmetric : numpy.ndarray, shape (..., n_features, n_features)
        Input array.

    discard_diagonal : boolean, optional
        If True, the values of the diagonal are not returned.
        Default is False.

    confounds: CSV file or array-like, optional
        This parameter is passed to signal.clean. Please see the related
        documentation for details.
        shape: (number of scans, number of confounds)

    Returns
    -------
    output : numpy.ndarray
        The output flattened lower triangular part of symmetric. Shape is
        (..., n_features * (n_features + 1) / 2) if discard_diagonal is False and
        (..., (n_features - 1) * n_features / 2) otherwise.


    """
    if discard_diagonal:
        # No scaling, we directly return the values
        tril_mask = np.tril(np.ones(symmetric.shape[-2:]), k=-1).astype(np.bool)
        return symmetric[..., tril_mask]
    scaling = np.ones(symmetric.shape[-2:])
    np.fill_diagonal(scaling, sqrt(2.))
    tril_mask = np.tril(np.ones(symmetric.shape[-2:])).astype(np.bool)
    vec = symmetric[..., tril_mask] / scaling[tril_mask]

    if confounds is not None:
        vec = signal.clean(vec, confounds=confounds)

    return vec
Exemplo n.º 43
0
def test_clean_img():

    rng = np.random.RandomState(0)

    data = rng.randn(10, 10, 10, 100) + .5
    data_flat = data.T.reshape(100, -1)
    data_img = nibabel.Nifti1Image(data, np.eye(4))

    assert_raises(
        ValueError, image.clean_img, data_img, t_r=None, low_pass=0.1)

    data_img_ = image.clean_img(
        data_img, detrend=True, standardize=False, low_pass=0.1, t_r=1.0)
    data_flat_ = signal.clean(
        data_flat, detrend=True, standardize=False, low_pass=0.1, t_r=1.0)

    np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1),
                                   data_flat_)
    # if NANs
    data[:, 9, 9] = np.nan
    # if infinity
    data[:, 5, 5] = np.inf
    nan_img = nibabel.Nifti1Image(data, np.eye(4))
    clean_im = image.clean_img(nan_img, ensure_finite=True)
    assert_true(np.any(np.isfinite(clean_im.get_data())), True)

    # test_clean_img_passing_nifti2image
    data_img_nifti2 = nibabel.Nifti2Image(data, np.eye(4))

    data_img_nifti2_ = image.clean_img(
        data_img_nifti2, detrend=True, standardize=False, low_pass=0.1, t_r=1.0)

    # if mask_img
    img, mask_img = data_gen.generate_fake_fmri(shape=(10, 10, 10), length=10)
    data_img_mask_ = image.clean_img(img, mask_img=mask_img)

    # Checks that output with full mask and without is equal
    data_img_ = image.clean_img(img)
    np.testing.assert_almost_equal(data_img_.get_data(),
                                   data_img_mask_.get_data())
Exemplo n.º 44
0
def covariance_matrix(series, gm_index, confounds=None):
    series = load(series)
    if series.shape[1] == 0:
        # Empty serie because region is empty
        return np.zeros((1, 1))

    if confounds is not None and np.ndim(confounds) == 3:
        confounds_ = []
        for c in confounds:
            c = load(c)
            if isinstance(c, basestring) or np.isfinite(c).all():
                confounds_.append(c)
        confounds = confounds_
    series = signal.clean(series, confounds=confounds)
    estimator = covariance.LedoitWolf()
    # Keep only gm regions
    series = series[:, np.array(gm_index)]
    try:
        estimator.fit(series)
        return estimator.covariance_, estimator.precision_
    except Exception as e:
        print e
        return np.eye(series.shape[1]), np.eye(series.shape[1])
Exemplo n.º 45
0
def fmricarpetplot(func_data, segmentation, outer_gs, tr=None, nskip=4):
    """
    Plot "the plot"
    """
    from nilearn.signal import clean

    # Define TR and number of frames
    notr = False
    if tr is None:
        notr = True
        tr = 1.
    ntsteps = func_data.shape[-1]

    data = func_data[segmentation > 0].reshape(-1, ntsteps)
    # Detrend data
    detrended = clean(data.T, t_r=tr).T

    # Order following segmentation labels
    seg = segmentation[segmentation > 0].reshape(-1)
    seg_labels = np.unique(seg)

    # Labels meaning
    cort_gm = seg_labels[(seg_labels > 100) & (seg_labels < 200)].tolist()
    deep_gm = seg_labels[(seg_labels > 30) & (seg_labels < 100)].tolist()
    cerebellum = [255]
    wm_csf = seg_labels[seg_labels < 10].tolist()
    seg_labels = cort_gm + deep_gm + cerebellum + wm_csf

    label_id = 0
    newsegm = np.zeros_like(seg)
    for _lab in seg_labels:
        newsegm[seg == _lab] = label_id
        label_id += 1
    order = np.argsort(newsegm)

    # Define nested GridSpec
    gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer_gs,
                                     width_ratios=[1, 100], wspace=0.0)

    # Segmentation colorbar
    ax0 = plt.subplot(gs[0])
    ax0.set_yticks([])
    ax0.set_xticks([])

    colors1 = plt.cm.summer(np.linspace(0., 1., len(cort_gm)))
    colors2 = plt.cm.autumn(np.linspace(0., 1., len(deep_gm) + 1))[::-1,...]
    colors3 = plt.cm.winter(np.linspace(0., .5, len(wm_csf)))[::-1,...]
    cmap = LinearSegmentedColormap.from_list('my_colormap', np.vstack((colors1, colors2, colors3)))

    ax0.imshow(newsegm[order, np.newaxis], interpolation='nearest', aspect='auto',
               cmap=cmap, vmax=len(seg_labels) - 1, vmin=0)
    ax0.grid(False)
    ax0.set_ylabel('voxels')

    # Carpet plot
    ax1 = plt.subplot(gs[1])
    theplot = ax1.imshow(detrended[order, :], interpolation='nearest',
                         aspect='auto', cmap='gray', vmin=-2, vmax=2)

    ax1.grid(False)
    ax1.set_yticks([])
    ax1.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = int(detrended.shape[-1] + 1) // 10
    xticks = list(
        range(0, detrended.shape[-1])[::interval]) + [detrended.shape[-1]-1]
    ax1.set_xticks(xticks)

    if notr:
        ax1.set_xlabel('time (frame #)')
    else:
        ax1.set_xlabel('time (s)')
        labels = tr * (np.array(xticks))
        ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()])

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        ax0.spines[side].set_color('none')
        ax0.spines[side].set_visible(False)
        ax1.spines[side].set_color('none')
        ax1.spines[side].set_visible(False)

    ax1.yaxis.set_ticks_position('left')
    ax1.xaxis.set_ticks_position('bottom')
    ax1.spines["bottom"].set_position(('outward', 20))
    ax1.spines["left"].set_color('none')
    ax1.spines["left"].set_visible(False)

    ax0.spines["left"].set_position(('outward', 20))
    ax0.spines["bottom"].set_color('none')
    ax0.spines["bottom"].set_visible(False)

    return [ax0, ax1], gs
               
             
                plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
                           ncol=3, mode="expand", borderaxespad=0.,fontsize=10)
                           
                pdf.savefig()
                plt.close()        
                
                # create array of all regressors and save it in text file
                all_reg = np.hstack((art_dat,mv,mv2,dmv,dmv2,vent,surr,whit,beat,hv))        
                #all_reg = np.hstack((mv,mv2,dmv,dmv2,vent,surr,whit,hv))
                
                np.savetxt(save_reg,all_reg)
        
                func_raw_dat = masker_epi.fit_transform(func_img)
                func_regressed_dat =signal.clean(func_raw_dat, confounds = all_reg)
                func_regressed = masker_epi.inverse_transform(func_regressed_dat)
                nibabel.save(func_regressed,save_func_reg)
        
#func_= masker_epi.inverse_transform(func_raw_dat[0:10,:])
'''
art_titles = []
for a in range(art_dat.shape[1]):    
    art_titles.append( 'art'+ str(a) )
reg_titles = np.hstack((art_titles,[ 'Tx','Ty','Tz','Rx','Ry','Rz','dTx','dTy',
                                    'dTz','dRx','dRy','dRz','Tx2','Ty2','Tz2',
                                    'Rx2','Ry2','Rz2','dTx2','dTy2','dTz2',
                                    'dRx2','dRy2','dRz2','ventricles',
                                    'surroundings', 'white matter',
                                    'brain stem cistern','hv1','hv2','hv3',
                                    'hv4','hv5']))
Exemplo n.º 47
0
def clean_timeserie(ts, motion):
    """ Returns cleaned timeserie
    """
    return clean(ts, detrend=False, standardize=False,
                high_pass=None, low_pass=.1, t_r=1.05,
                confounds=motion)
Exemplo n.º 48
0
def plot_carpet(img, atlaslabels, detrend=True, nskip=0, size=(950, 800),
                subplot=None, title=None, output_file=None, legend=False,
                lut=None, tr=None):
    """
    Plot an image representation of voxel intensities across time also know
    as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
    2017 Jul 1; 154:150-158.

    Parameters
    ----------

        img : Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
            4D input image
        atlaslabels: ndarray
            A 3D array of integer labels from an atlas, resampled into ``img`` space.
        detrend : boolean, optional
            Detrend and standardize the data prior to plotting.
        nskip : int
            Number of volumes at the beginning of the scan marked as nonsteady state.
        long_cutoff : int
            Number of TRs to consider img too long (and decimate the time direction
            to save memory)
        axes : matplotlib axes, optional
            The axes used to display the plot. If None, the complete
            figure is used.
        title : string, optional
            The title displayed on the figure.
        output_file : string, or None, optional
            The name of an image file to export the plot to. Valid extensions
            are .png, .pdf, .svg. If output_file is not None, the plot
            is saved to a file, and the display is closed.
        legend : bool
            Whether to render the average functional series with ``atlaslabels`` as
            overlay.
        tr : float , optional
            Specify the TR, if specified it uses this value. If left as None,
            # Frames is plotted instead of time.
    """

    # Define TR and number of frames
    notr = False
    if tr is None:
        notr = True
        tr = 1.

    img_nii = check_niimg_4d(img, dtype='auto',)
    func_data = _safe_get_data(img_nii, ensure_finite=True)
    ntsteps = func_data.shape[-1]

    data = func_data[atlaslabels > 0].reshape(-1, ntsteps)
    seg = atlaslabels[atlaslabels > 0].reshape(-1)

    # Map segmentation
    if lut is None:
        lut = np.zeros((256, ), dtype='int')
        lut[1:11] = 1
        lut[255] = 2
        lut[30:99] = 3
        lut[100:201] = 4

    # Apply lookup table
    newsegm = lut[seg.astype(int)]

    p_dec = 1 + data.shape[0] // size[0]
    if p_dec:
        data = data[::p_dec, :]
        newsegm = newsegm[::p_dec]

    t_dec = 1 + data.shape[1] // size[1]
    if t_dec:
        data = data[:, ::t_dec]

    # Detrend data
    v = (None, None)
    if detrend:
        data = clean(data.T, t_r=tr).T
        v = (-2, 2)

    # Order following segmentation labels
    order = np.argsort(newsegm)[::-1]

    # If subplot is not defined
    if subplot is None:
        subplot = mgs.GridSpec(1, 1)[0]

    # Define nested GridSpec
    wratios = [1, 100, 20]
    gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot,
                                     width_ratios=wratios[:2 + int(legend)],
                                     wspace=0.0)

    mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1])

    # Segmentation colorbar
    ax0 = plt.subplot(gs[0])
    ax0.set_yticks([])
    ax0.set_xticks([])
    ax0.imshow(newsegm[order, np.newaxis], interpolation='none', aspect='auto',
               cmap=mycolors, vmin=1, vmax=4)
    ax0.grid(False)
    ax0.spines["left"].set_visible(False)
    ax0.spines["bottom"].set_color('none')
    ax0.spines["bottom"].set_visible(False)

    # Carpet plot
    ax1 = plt.subplot(gs[1])
    ax1.imshow(data[order, ...], interpolation='nearest', aspect='auto', cmap='gray',
               vmin=v[0], vmax=v[1])

    ax1.grid(False)
    ax1.set_yticks([])
    ax1.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = max((int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
    xticks = list(range(0, data.shape[-1])[::interval])
    ax1.set_xticks(xticks)
    if notr:
        ax1.set_xlabel('time (frame #)')
    else:
        ax1.set_xlabel('time (s)')
    labels = tr * (np.array(xticks)) * t_dec
    ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5)

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        ax0.spines[side].set_color('none')
        ax0.spines[side].set_visible(False)
        ax1.spines[side].set_color('none')
        ax1.spines[side].set_visible(False)

    ax1.yaxis.set_ticks_position('left')
    ax1.xaxis.set_ticks_position('bottom')
    ax1.spines["bottom"].set_visible(False)
    ax1.spines["left"].set_color('none')
    ax1.spines["left"].set_visible(False)

    if legend:
        gslegend = mgs.GridSpecFromSubplotSpec(
            5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0)
        epiavg = func_data.mean(3)
        epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header)
        segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine, epinii.header)
        segnii.set_data_dtype('uint8')

        nslices = epiavg.shape[-1]
        coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices), 5).astype(np.uint8)
        for i, c in enumerate(coords.tolist()):
            ax2 = plt.subplot(gslegend[i])
            plot_img(segnii, bg_img=epinii, axes=ax2, display_mode='z',
                     annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors,
                     interpolation='nearest')

    if output_file is not None:
        figure = plt.gcf()
        figure.savefig(output_file, bbox_inches='tight')
        plt.close(figure)
        figure = None
        return output_file

    return [ax0, ax1], gs
Exemplo n.º 49
0
def test_clean_confounds():
    signals, noises, confounds = generate_signals(n_features=41,
                                                  n_confounds=5, length=45)
    # No signal: output must be zero.
    eps = np.finfo(np.float).eps
    noises1 = noises.copy()
    cleaned_signals = nisignal.clean(noises, confounds=confounds,
                                     detrend=True, standardize=False)
    assert_true(abs(cleaned_signals).max() < 100. * eps)
    np.testing.assert_almost_equal(noises, noises1, decimal=12)

    # With signal: output must be orthogonal to confounds
    cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,
                                     detrend=False, standardize=True)
    assert_true(abs(np.dot(confounds.T, cleaned_signals)).max() < 1000. * eps)

    # Same output when a constant confound is added
    confounds1 = np.hstack((np.ones((45, 1)), confounds))
    cleaned_signals1 = nisignal.clean(signals + noises, confounds=confounds1,
                                      detrend=False, standardize=True)
    np.testing.assert_almost_equal(cleaned_signals1, cleaned_signals)

    # Test detrending. No trend should exist in the output.
    # Use confounds with a trend.
    temp = confounds.T
    temp += np.arange(confounds.shape[0])

    cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,
                                     detrend=False, standardize=False)
    coeffs = np.polyfit(np.arange(cleaned_signals.shape[0]),
                        cleaned_signals, 1)
    assert_true((abs(coeffs) > 1e-3).any())   # trends remain

    cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,
                                     detrend=True, standardize=False)
    coeffs = np.polyfit(np.arange(cleaned_signals.shape[0]),
                        cleaned_signals, 1)
    assert_true((abs(coeffs) < 100. * eps).all())  # trend removed

    # Test no-op
    input_signals = 10 * signals
    cleaned_signals = nisignal.clean(input_signals, detrend=False,
                                     standardize=False)
    np.testing.assert_almost_equal(cleaned_signals, input_signals)

    cleaned_signals = nisignal.clean(input_signals, detrend=False,
                                     standardize=True)
    np.testing.assert_almost_equal(cleaned_signals.var(axis=0),
                                   np.ones(cleaned_signals.shape[1]))

    # Test with confounds read from a file. Smoke test only (result has
    # no meaning).
    current_dir = os.path.split(__file__)[0]

    signals, _, confounds = generate_signals(n_features=41,
                                             n_confounds=3, length=20)
    filename1 = os.path.join(current_dir, "data", "spm_confounds.txt")
    filename2 = os.path.join(current_dir, "data",
                             "confounds_with_header.csv")

    nisignal.clean(signals, detrend=False, standardize=False,
                   confounds=filename1)
    nisignal.clean(signals, detrend=False, standardize=False,
                   confounds=filename2)
    nisignal.clean(signals, detrend=False, standardize=False,
                   confounds=confounds[:, 1])

    # Use a list containing two filenames, a 2D array and a 1D array
    nisignal.clean(signals, detrend=False, standardize=False,
                   confounds=[filename1, confounds[:, 0:2],
                              filename2, confounds[:, 2]])

    # Test error handling
    assert_raises(TypeError, nisignal.clean, signals, confounds=1)
    assert_raises(ValueError, nisignal.clean, signals, confounds=np.zeros(2))
    assert_raises(ValueError, nisignal.clean, signals,
                  confounds=np.zeros((2, 2)))
    assert_raises(ValueError, nisignal.clean, signals,
                  confounds=np.zeros((2, 3, 4)))
    assert_raises(ValueError, nisignal.clean, signals[:-1, :],
                  confounds=filename1)
    assert_raises(TypeError, nisignal.clean, signals,
                  confounds=[None])