Beispiel #1
0
def test_data(tmpdir):
    sim = Simulator()
    r = 10
    sigma = 1
    y = [0, 1]
    n_reps = 3
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)

    shape_3d = (91, 109, 91)
    shape_2d = (6, 238955)
    y=pd.read_csv(os.path.join(str(tmpdir.join('y.csv'))),header=None,index_col=None).T
    flist = glob.glob(str(tmpdir.join('centered*.nii.gz')))
    dat = Brain_Data(data=flist,Y=y)

    # Test shape
    assert dat.shape() == shape_2d

    # Test Mean
    assert dat.mean().shape()[0] == shape_2d[1]

    # Test Std
    assert dat.std().shape()[0] == shape_2d[1]

    # Test to_nifti
    d = dat.to_nifti
    assert d().shape[0:3] == shape_3d

    # # Test T-test
    out = dat.ttest()
    assert out['t'].shape()[0]==shape_2d[1]

    # Test Regress
    dat.X = pd.DataFrame({'Intercept':np.ones(len(dat.Y)),'X1':np.array(dat.Y).flatten()},index=None)
    out = dat.regress()
    assert out['beta'].shape() == (2,shape_2d[1])

    # Test indexing
    assert out['t'][1].shape()[0] == shape_2d[1]

    # Test threshold
    i=1
    tt = threshold(out['t'][i], out['p'][i], threshold_dict={'fdr':.05})
    assert tt.shape()[0] == shape_2d[1]
Beispiel #2
0
def test_data(tmpdir):
    sim = Simulator()
    r = 10
    sigma = 1
    y = [0, 1]
    n_reps = 3
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)

    shape_3d = (91, 109, 91)
    shape_2d = (6, 238955)
    y = pd.read_csv(os.path.join(str(tmpdir.join('y.csv'))),
                    header=None,
                    index_col=None).T
    flist = glob.glob(str(tmpdir.join('centered*.nii.gz')))

    # Test load list
    dat = Brain_Data(data=flist, Y=y)

    # Test load file
    assert Brain_Data(flist[0])

    # Test to_nifti
    d = dat.to_nifti()
    assert d.shape[0:3] == shape_3d

    # Test load nibabel
    assert Brain_Data(d)

    # Test shape
    assert dat.shape() == shape_2d

    # Test Mean
    assert dat.mean().shape()[0] == shape_2d[1]

    # Test Std
    assert dat.std().shape()[0] == shape_2d[1]

    # Test add
    new = dat + dat
    assert new.shape() == shape_2d

    # Test subtract
    new = dat - dat
    assert new.shape() == shape_2d

    # Test multiply
    new = dat * dat
    assert new.shape() == shape_2d

    # Test Iterator
    x = [x for x in dat]
    assert len(x) == len(dat)
    assert len(x[0].data.shape) == 1

    # # Test T-test
    out = dat.ttest()
    assert out['t'].shape()[0] == shape_2d[1]

    # # # Test T-test - permutation method
    # out = dat.ttest(threshold_dict={'permutation':'tfce','n_permutations':50,'n_jobs':1})
    # assert out['t'].shape()[0]==shape_2d[1]

    # Test Regress
    dat.X = pd.DataFrame(
        {
            'Intercept': np.ones(len(dat.Y)),
            'X1': np.array(dat.Y).flatten()
        },
        index=None)
    out = dat.regress()
    assert out['beta'].shape() == (2, shape_2d[1])

    # Test indexing
    assert out['t'][1].shape()[0] == shape_2d[1]

    # Test threshold
    i = 1
    tt = threshold(out['t'][i], out['p'][i], .05)
    assert isinstance(tt, Brain_Data)

    # Test write
    dat.write(os.path.join(str(tmpdir.join('test_write.nii'))))
    assert Brain_Data(os.path.join(str(tmpdir.join('test_write.nii'))))

    # Test append
    assert dat.append(dat).shape()[0] == shape_2d[0] * 2

    # Test distance
    distance = dat.distance(method='euclidean')
    assert distance.shape == (shape_2d[0], shape_2d[0])

    # Test predict
    stats = dat.predict(algorithm='svm',
                        cv_dict={
                            'type': 'kfolds',
                            'n_folds': 2,
                            'n': len(dat.Y)
                        },
                        plot=False,
                        **{'kernel': "linear"})

    # Support Vector Regression, with 5 fold cross-validation with Platt Scaling
    # This will output probabilities of each class
    stats = dat.predict(algorithm='svm',
                        cv_dict=None,
                        plot=False,
                        **{
                            'kernel': 'linear',
                            'probability': True
                        })

    assert isinstance(stats['weight_map'], Brain_Data)
    # Logistic classificiation, with 5 fold stratified cross-validation.

    stats = dat.predict(algorithm='logistic',
                        cv_dict={
                            'type': 'kfolds',
                            'n_folds': 5,
                            'n': len(dat.Y)
                        },
                        plot=False)
    assert isinstance(stats['weight_map'], Brain_Data)

    # Ridge classificiation, with 5 fold between-subject cross-validation, where data for each subject is held out together.
    stats = dat.predict(algorithm='ridgeClassifier', cv_dict=None, plot=False)
    assert isinstance(stats['weight_map'], Brain_Data)

    # Test Similarity
    r = dat.similarity(stats['weight_map'])
    assert len(r) == shape_2d[0]
    r2 = dat.similarity(stats['weight_map'].to_nifti())
    assert len(r2) == shape_2d[0]

    # Test apply_mask - might move part of this to test mask suite
    s1 = create_sphere([41, 64, 55], radius=10)
    assert isinstance(s1, nb.Nifti1Image)
    s2 = Brain_Data(s1)
    masked_dat = dat.apply_mask(s1)
    assert masked_dat.shape()[1] == np.sum(s2.data != 0)

    # Test extract_roi
    mask = create_sphere([41, 64, 55], radius=10)
    assert len(dat.extract_roi(mask)) == shape_2d[0]

    # Test r_to_z
    z = dat.r_to_z()
    assert z.shape() == dat.shape()

    # Test copy
    d_copy = dat.copy()
    assert d_copy.shape() == dat.shape()

    # Test detrend
    detrend = dat.detrend()
    assert detrend.shape() == dat.shape()
In the remainder of the tutorial, we will move from simulation to playing with real data. 

Let's watch another video by Tor Wager on how multiple comparison approaches are used in practice, highlighting some of the pitfalls with some of the different approaches.

YouTubeVideo('N7Iittt8HrU')

We will be exploring two simple and fast ways to threshold your group analyses.

First, we will simply threshold based on selecting an arbitrary statistical threshold. The values are completely arbitrary, but it is common to start with something like p < .001. We call this *uncorrected* because this is simply the threshold for any voxel as we are not controlling for multiple tests.

con1_name = 'horizontal_checkerboard'
data_dir = '../data/localizer'
con1_file_list = glob.glob(os.path.join(data_dir, 'derivatives','fmriprep','*', 'func', f'sub*_{con1_name}*nii.gz'))
con1_file_list.sort()
con1_dat = Brain_Data(con1_file_list)
con1_stats = con1_dat.ttest(threshold_dict={'unc':.001})

con1_stats['thr_t'].plot()

We see some significant activations in visual cortex, but we also see strong t-tests in the auditory cortex.

Why do you think this is?



We can also easily run FDR correction by changing the inputs of the `threshold_dict`. We will be using a q value of 0.05 to control our false discovery rate.

con1_stats = con1_dat.ttest(threshold_dict={'fdr':.05})
con1_stats['thr_t'].plot()

You can see that at least for this particular contrast, the FDR threshold appears to be more liberal than p < 0.001 uncorrected.
Beispiel #4
0
def test_brain_data(tmpdir):

    # Add 3mm to list to test that resolution as well
    for resolution in ['2mm']:

        MNI_Template["resolution"] = resolution

        sim = Simulator()
        r = 10
        sigma = 1
        y = [0, 1]
        n_reps = 3
        output_dir = str(tmpdir)
        dat = sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)

        if MNI_Template["resolution"] == '2mm':
            shape_3d = (91, 109, 91)
            shape_2d = (6, 238955)
        elif MNI_Template["resolution"] == '3mm':
            shape_3d = (60, 72, 60)
            shape_2d = (6, 71020)

        y = pd.read_csv(os.path.join(str(tmpdir.join('y.csv'))),header=None, index_col=None)
        holdout = pd.read_csv(os.path.join(str(tmpdir.join('rep_id.csv'))),header=None,index_col=None)

        # Test load list of 4D images
        file_list = [str(tmpdir.join('data.nii.gz')), str(tmpdir.join('data.nii.gz'))]
        dat = Brain_Data(file_list)
        dat = Brain_Data([nb.load(x) for x in file_list])

        # Test load list
        dat = Brain_Data(data=str(tmpdir.join('data.nii.gz')), Y=y)

        # Test concatenate
        out = Brain_Data([x for x in dat])
        assert isinstance(out, Brain_Data)
        assert len(out)==len(dat)

        # Test to_nifti
        d = dat.to_nifti()
        assert d.shape[0:3] == shape_3d

        # Test load nibabel
        assert Brain_Data(d)

        # Test shape
        assert dat.shape() == shape_2d

        # Test Mean
        assert dat.mean().shape()[0] == shape_2d[1]

        # Test Std
        assert dat.std().shape()[0] == shape_2d[1]

        # Test add
        new = dat + dat
        assert new.shape() == shape_2d

        # Test subtract
        new = dat - dat
        assert new.shape() == shape_2d

        # Test multiply
        new = dat * dat
        assert new.shape() == shape_2d

        # Test Indexing
        index = [0, 3, 1]
        assert len(dat[index]) == len(index)
        index = range(4)
        assert len(dat[index]) == len(index)
        index = dat.Y == 1

        assert len(dat[index.values.flatten()]) == index.values.sum()

        assert len(dat[index]) == index.values.sum()
        assert len(dat[:3]) == 3

        # Test Iterator
        x = [x for x in dat]
        assert len(x) == len(dat)
        assert len(x[0].data.shape) == 1

        # # Test T-test
        out = dat.ttest()
        assert out['t'].shape()[0] == shape_2d[1]

        # # # Test T-test - permutation method
        # out = dat.ttest(threshold_dict={'permutation':'tfce','n_permutations':50,'n_jobs':1})
        # assert out['t'].shape()[0]==shape_2d[1]

        # Test Regress
        dat.X = pd.DataFrame({'Intercept':np.ones(len(dat.Y)),
                            'X1':np.array(dat.Y).flatten()}, index=None)

        # Standard OLS
        out = dat.regress()

        assert type(out['beta'].data) == np.ndarray
        assert type(out['t'].data) == np.ndarray
        assert type(out['p'].data) == np.ndarray
        assert type(out['residual'].data) == np.ndarray
        assert type(out['df'].data) == np.ndarray
        assert out['beta'].shape() == (2, shape_2d[1])
        assert out['t'][1].shape()[0] == shape_2d[1]

        # Robust OLS
        out = dat.regress(mode='robust')

        assert type(out['beta'].data) == np.ndarray
        assert type(out['t'].data) == np.ndarray
        assert type(out['p'].data) == np.ndarray
        assert type(out['residual'].data) == np.ndarray
        assert type(out['df'].data) == np.ndarray
        assert out['beta'].shape() == (2, shape_2d[1])
        assert out['t'][1].shape()[0] == shape_2d[1]

        # Test threshold
        i=1
        tt = threshold(out['t'][i], out['p'][i], .05)
        assert isinstance(tt, Brain_Data)

        # Test write
        dat.write(os.path.join(str(tmpdir.join('test_write.nii'))))
        assert Brain_Data(os.path.join(str(tmpdir.join('test_write.nii'))))

        # Test append
        assert dat.append(dat).shape()[0] == shape_2d[0]*2

        # Test distance
        distance = dat.distance(method='euclidean')
        assert isinstance(distance, Adjacency)
        assert distance.square_shape()[0] == shape_2d[0]

        # Test predict
        stats = dat.predict(algorithm='svm',
                            cv_dict={'type': 'kfolds', 'n_folds': 2},
                            plot=False, **{'kernel':"linear"})

        # Support Vector Regression, with 5 fold cross-validation with Platt Scaling
        # This will output probabilities of each class
        stats = dat.predict(algorithm='svm',
                            cv_dict=None, plot=False,
                            **{'kernel':'linear', 'probability':True})
        assert isinstance(stats['weight_map'], Brain_Data)

        # Logistic classificiation, with 2 fold cross-validation.
        stats = dat.predict(algorithm='logistic',
                            cv_dict={'type': 'kfolds', 'n_folds': 2},
                            plot=False)
        assert isinstance(stats['weight_map'], Brain_Data)

        # Ridge classificiation,
        stats = dat.predict(algorithm='ridgeClassifier', cv_dict=None, plot=False)
        assert isinstance(stats['weight_map'], Brain_Data)

        # Ridge
        stats = dat.predict(algorithm='ridge',
                            cv_dict={'type': 'kfolds', 'n_folds': 2,
                            'subject_id':holdout}, plot=False, **{'alpha':.1})

        # Lasso
        stats = dat.predict(algorithm='lasso',
                            cv_dict={'type': 'kfolds', 'n_folds': 2,
                            'stratified':dat.Y}, plot=False, **{'alpha':.1})

        # PCR
        stats = dat.predict(algorithm='pcr', cv_dict=None, plot=False)

        # Test Similarity
        r = dat.similarity(stats['weight_map'])
        assert len(r) == shape_2d[0]
        r2 = dat.similarity(stats['weight_map'].to_nifti())
        assert len(r2) == shape_2d[0]
        r = dat.similarity(stats['weight_map'], method='dot_product')
        assert len(r) == shape_2d[0]
        r = dat.similarity(stats['weight_map'], method='cosine')
        assert len(r) == shape_2d[0]
        r = dat.similarity(dat, method='correlation')
        assert r.shape == (dat.shape()[0],dat.shape()[0])
        r = dat.similarity(dat, method='dot_product')
        assert r.shape == (dat.shape()[0],dat.shape()[0])
        r = dat.similarity(dat, method='cosine')
        assert r.shape == (dat.shape()[0],dat.shape()[0])

        # Test apply_mask - might move part of this to test mask suite
        s1 = create_sphere([12, 10, -8], radius=10)
        assert isinstance(s1, nb.Nifti1Image)
        masked_dat = dat.apply_mask(s1)
        assert masked_dat.shape()[1] == np.sum(s1.get_data() != 0)

        # Test extract_roi
        mask = create_sphere([12, 10, -8], radius=10)
        assert len(dat.extract_roi(mask)) == shape_2d[0]

        # Test r_to_z
        z = dat.r_to_z()
        assert z.shape() == dat.shape()

        # Test copy
        d_copy = dat.copy()
        assert d_copy.shape() == dat.shape()

        # Test detrend
        detrend = dat.detrend()
        assert detrend.shape() == dat.shape()

        # Test standardize
        s = dat.standardize()
        assert s.shape() == dat.shape()
        assert np.isclose(np.sum(s.mean().data), 0, atol=.1)
        s = dat.standardize(method='zscore')
        assert s.shape() == dat.shape()
        assert np.isclose(np.sum(s.mean().data), 0, atol=.1)

        # Test Sum
        s = dat.sum()
        assert s.shape() == dat[1].shape()

        # Test Groupby
        s1 = create_sphere([12, 10, -8], radius=10)
        s2 = create_sphere([22, -2, -22], radius=10)
        mask = Brain_Data([s1, s2])
        d = dat.groupby(mask)
        assert isinstance(d, Groupby)

        # Test Aggregate
        mn = dat.aggregate(mask, 'mean')
        assert isinstance(mn, Brain_Data)
        assert len(mn.shape()) == 1

        # Test Threshold
        s1 = create_sphere([12, 10, -8], radius=10)
        s2 = create_sphere([22, -2, -22], radius=10)
        mask = Brain_Data(s1)*5
        mask = mask + Brain_Data(s2)

        m1 = mask.threshold(upper=.5)
        m2 = mask.threshold(upper=3)
        m3 = mask.threshold(upper='98%')
        m4 = Brain_Data(s1)*5 + Brain_Data(s2)*-.5
        m4 = mask.threshold(upper=.5,lower=-.3)
        assert np.sum(m1.data > 0) > np.sum(m2.data > 0)
        assert np.sum(m1.data > 0) == np.sum(m3.data > 0)
        assert np.sum(m4.data[(m4.data > -.3) & (m4.data <.5)]) == 0
        assert np.sum(m4.data[(m4.data < -.3) | (m4.data >.5)]) > 0

        # Test Regions
        r = mask.regions(min_region_size=10)
        m1 = Brain_Data(s1)
        m2 = r.threshold(1, binarize=True)
        # assert len(r)==2
        assert len(np.unique(r.to_nifti().get_data())) == 2
        diff = m2-m1
        assert np.sum(diff.data) == 0

        # Test Bootstrap
        masked = dat.apply_mask(create_sphere(radius=10, coordinates=[0, 0, 0]))
        n_samples = 3
        b = masked.bootstrap('mean', n_samples=n_samples)
        assert isinstance(b['Z'], Brain_Data)
        b = masked.bootstrap('std', n_samples=n_samples)
        assert isinstance(b['Z'], Brain_Data)
        b = masked.bootstrap('predict', n_samples=n_samples, plot=False)
        assert isinstance(b['Z'], Brain_Data)
        b = masked.bootstrap('predict', n_samples=n_samples,
                        plot=False, cv_dict={'type':'kfolds','n_folds':3})
        assert isinstance(b['Z'], Brain_Data)
        b = masked.bootstrap('predict', n_samples=n_samples,
                        save_weights=True, plot=False)
        assert len(b['samples'])==n_samples

        # Test decompose
        n_components = 3
        stats = dat.decompose(algorithm='pca', axis='voxels',
                              n_components=n_components)
        assert n_components == len(stats['components'])
        assert stats['weights'].shape == (len(dat), n_components)

        stats = dat.decompose(algorithm='ica', axis='voxels',
                              n_components=n_components)
        assert n_components == len(stats['components'])
        assert stats['weights'].shape == (len(dat), n_components)

        dat.data = dat.data + 2
        dat.data[dat.data<0] = 0
        stats = dat.decompose(algorithm='nnmf', axis='voxels',
                              n_components=n_components)
        assert n_components == len(stats['components'])
        assert stats['weights'].shape == (len(dat), n_components)

        stats = dat.decompose(algorithm='fa', axis='voxels',
                              n_components=n_components)
        assert n_components == len(stats['components'])
        assert stats['weights'].shape == (len(dat), n_components)

        stats = dat.decompose(algorithm='pca', axis='images',
                              n_components=n_components)
        assert n_components == len(stats['components'])
        assert stats['weights'].shape == (len(dat), n_components)

        stats = dat.decompose(algorithm='ica', axis='images',
                              n_components=n_components)
        assert n_components == len(stats['components'])
        assert stats['weights'].shape == (len(dat), n_components)

        dat.data = dat.data + 2
        dat.data[dat.data<0] = 0
        stats = dat.decompose(algorithm='nnmf', axis='images',
                              n_components=n_components)
        assert n_components == len(stats['components'])
        assert stats['weights'].shape == (len(dat), n_components)

        stats = dat.decompose(algorithm='fa', axis='images',
                              n_components=n_components)
        assert n_components == len(stats['components'])
        assert stats['weights'].shape == (len(dat), n_components)

        # Test Hyperalignment Method
        sim = Simulator()
        y = [0, 1]
        n_reps = 10
        s1 = create_sphere([0, 0, 0], radius=3)
        d1 = sim.create_data(y, 1, reps=n_reps, output_dir=None).apply_mask(s1)
        d2 = sim.create_data(y, 2, reps=n_reps, output_dir=None).apply_mask(s1)
        d3 = sim.create_data(y, 3, reps=n_reps, output_dir=None).apply_mask(s1)

        # Test procrustes using align
        data = [d1, d2, d3]
        out = align(data, method='procrustes')
        assert len(data) == len(out['transformed'])
        assert len(data) == len(out['transformation_matrix'])
        assert data[0].shape() == out['common_model'].shape()
        transformed = np.dot(d1.data, out['transformation_matrix'][0])
        centered = d1.data - np.mean(d1.data, 0)
        transformed = (np.dot(centered/np.linalg.norm(centered), out['transformation_matrix'][0])*out['scale'][0])
        np.testing.assert_almost_equal(0, np.sum(out['transformed'][0].data - transformed), decimal=5)

        # Test deterministic brain_data
        bout = d1.align(out['common_model'], method='deterministic_srm')
        assert d1.shape() == bout['transformed'].shape()
        assert d1.shape() == bout['common_model'].shape()
        assert d1.shape()[1] == bout['transformation_matrix'].shape[0]
        btransformed = np.dot(d1.data, bout['transformation_matrix'])
        np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data - btransformed))

        # Test deterministic brain_data
        bout = d1.align(out['common_model'], method='probabilistic_srm')
        assert d1.shape() == bout['transformed'].shape()
        assert d1.shape() == bout['common_model'].shape()
        assert d1.shape()[1] == bout['transformation_matrix'].shape[0]
        btransformed = np.dot(d1.data, bout['transformation_matrix'])
        np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed))

        # Test procrustes brain_data
        bout = d1.align(out['common_model'], method='procrustes')
        assert d1.shape() == bout['transformed'].shape()
        assert d1.shape() == bout['common_model'].shape()
        assert d1.shape()[1] == bout['transformation_matrix'].shape[0]
        centered = d1.data - np.mean(d1.data, 0)
        btransformed = (np.dot(centered/np.linalg.norm(centered), bout['transformation_matrix'])*bout['scale'])
        np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed), decimal=5)
        np.testing.assert_almost_equal(0, np.sum(out['transformed'][0].data - bout['transformed'].data))

        # Test hyperalignment on Brain_Data over time (axis=1)
        sim = Simulator()
        y = [0, 1]
        n_reps = 10
        s1 = create_sphere([0, 0, 0], radius=5)
        d1 = sim.create_data(y, 1, reps=n_reps, output_dir=None).apply_mask(s1)
        d2 = sim.create_data(y, 2, reps=n_reps, output_dir=None).apply_mask(s1)
        d3 = sim.create_data(y, 3, reps=n_reps, output_dir=None).apply_mask(s1)
        data = [d1, d2, d3]

        out = align(data, method='procrustes', axis=1)
        assert len(data) == len(out['transformed'])
        assert len(data) == len(out['transformation_matrix'])
        assert data[0].shape() == out['common_model'].shape()
        centered = data[0].data.T-np.mean(data[0].data.T, 0)
        transformed = (np.dot(centered/np.linalg.norm(centered), out['transformation_matrix'][0])*out['scale'][0])
        np.testing.assert_almost_equal(0,np.sum(out['transformed'][0].data-transformed.T), decimal=5)

        bout = d1.align(out['common_model'], method='deterministic_srm', axis=1)
        assert d1.shape() == bout['transformed'].shape()
        assert d1.shape() == bout['common_model'].shape()
        assert d1.shape()[0] == bout['transformation_matrix'].shape[0]
        btransformed = np.dot(d1.data.T, bout['transformation_matrix'])
        np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed.T))

        bout = d1.align(out['common_model'], method='probabilistic_srm', axis=1)
        assert d1.shape() == bout['transformed'].shape()
        assert d1.shape() == bout['common_model'].shape()
        assert d1.shape()[0] == bout['transformation_matrix'].shape[0]
        btransformed = np.dot(d1.data.T, bout['transformation_matrix'])
        np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed.T))

        bout = d1.align(out['common_model'], method='procrustes', axis=1)
        assert d1.shape() == bout['transformed'].shape()
        assert d1.shape() == bout['common_model'].shape()
        assert d1.shape()[0] == bout['transformation_matrix'].shape[0]
        centered = d1.data.T-np.mean(d1.data.T, 0)
        btransformed = (np.dot(centered/np.linalg.norm(centered), bout['transformation_matrix'])*bout['scale'])
        np.testing.assert_almost_equal(0, np.sum(bout['transformed'].data-btransformed.T), decimal=5)
        np.testing.assert_almost_equal(0, np.sum(out['transformed'][0].data-bout['transformed'].data))
Beispiel #5
0
con1_file_list.sort()
con1_dat = Brain_Data(con1_file_list)

# Now that we have the data loaded, we can run quick operations such as, what is the mean activation in each voxel across participants?  Or, what is the standard deviation of the voxel activity across participants?
#
# Notice how we can chain different commands like `.mean()` and `.plot()`.  This makes it easy to quickly manipulate the data similar to how we use tools like pandas.

# In[89]:

con1_dat.mean().plot()

# We can use the `ttest()` method to run a quick t-test across each voxel in the brain.

# In[90]:

con1_stats = con1_dat.ttest()

print(con1_stats.keys())

# This return a dictionary of a map of the t-values and a separate one containing the p-value for each voxel.
#
# For now, let's look at the results of the t-ttest and threshold them to something like t>4.

# In[91]:

con1_stats['t'].iplot()

# As you can see we see very clear activation in various parts of visual cortex, which we expected from the visual stimulation.
#
# However, if wanted to test the hypothesis that there are specific areas of early visual cortex (e.g., V1) that process edge orientations, we could run a specific contrast comparing vertical orientations with horizontal orientations.
#
import numpy as np
import pandas as pd

all_sub = Brain_Data()
for s in subject_id.unique():
    sdat = data[np.where(metadata['SubjectID']==s)[0]]
    sdat.X = pd.DataFrame(data={'Intercept':np.ones(sdat.shape()[0]),'Pain':sdat.X['PainLevel']})
    stats = sdat.regress()
    all_sub = all_sub.append(stats['beta'][1])

#########################################################################
# We can now run a one-sample t-test at every voxel to test whether it is 
# significantly different from zero across participants.  We will threshold
# the results using FDR correction, q < 0.001.

t_stats = all_sub.ttest(threshold_dict={'fdr':.001})
t_stats['thr_t'].plot()

#########################################################################
# Run Linear Contrast
# -------------------
# 
# Obviously, the univariate regression isn't a great idea when there are only
# three observations per subject.  As we predict a monotonic increase in pain
# across pain intensities, we can also calculate a linear contrast c=(-1,0,1).
# This is simple using matrix multiplication on the centered pain intensity values.

all_sub = Brain_Data()
for s in subject_id.unique():
    sdat = data[np.where(metadata['SubjectID']==s)[0]]
    sdat.X = pd.DataFrame(data={'Pain':sdat.X['PainLevel']})
Beispiel #7
0
def test_brain_data(tmpdir):
    sim = Simulator()
    r = 10
    sigma = 1
    y = [0, 1]
    n_reps = 3
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)

    shape_3d = (91, 109, 91)
    shape_2d = (6, 238955)
    y=pd.read_csv(os.path.join(str(tmpdir.join('y.csv'))), header=None,index_col=None).T
    holdout=pd.read_csv(os.path.join(str(tmpdir.join('rep_id.csv'))),header=None,index_col=None).T
    flist = glob.glob(str(tmpdir.join('centered*.nii.gz')))

    # Test load list
    dat = Brain_Data(data=flist,Y=y)

    # Test load file
    assert Brain_Data(flist[0])

    # Test to_nifti
    d = dat.to_nifti()
    assert d.shape[0:3] == shape_3d

    # Test load nibabel
    assert Brain_Data(d)

    # Test shape
    assert dat.shape() == shape_2d

    # Test Mean
    assert dat.mean().shape()[0] == shape_2d[1]

    # Test Std
    assert dat.std().shape()[0] == shape_2d[1]

    # Test add
    new = dat + dat
    assert new.shape() == shape_2d

    # Test subtract
    new = dat - dat
    assert new.shape() == shape_2d

    # Test multiply
    new = dat * dat
    assert new.shape() == shape_2d

    # Test Iterator
    x = [x for x in dat]
    assert len(x) == len(dat)
    assert len(x[0].data.shape) == 1

    # # Test T-test
    out = dat.ttest()
    assert out['t'].shape()[0] == shape_2d[1]

    # # # Test T-test - permutation method
    # out = dat.ttest(threshold_dict={'permutation':'tfce','n_permutations':50,'n_jobs':1})
    # assert out['t'].shape()[0]==shape_2d[1]

    # Test Regress
    dat.X = pd.DataFrame({'Intercept':np.ones(len(dat.Y)), 'X1':np.array(dat.Y).flatten()},index=None)
    out = dat.regress()
    assert out['beta'].shape() == (2,shape_2d[1])

    # Test indexing
    assert out['t'][1].shape()[0] == shape_2d[1]

    # Test threshold
    i=1
    tt = threshold(out['t'][i], out['p'][i], .05)
    assert isinstance(tt,Brain_Data)

    # Test write
    dat.write(os.path.join(str(tmpdir.join('test_write.nii'))))
    assert Brain_Data(os.path.join(str(tmpdir.join('test_write.nii'))))

    # Test append
    assert dat.append(dat).shape()[0]==shape_2d[0]*2

    # Test distance
    distance = dat.distance(method='euclidean')
    assert isinstance(distance,Adjacency)
    assert distance.square_shape()[0]==shape_2d[0]

    # Test predict
    stats = dat.predict(algorithm='svm', cv_dict={'type': 'kfolds','n_folds': 2}, plot=False,**{'kernel':"linear"})

    # Support Vector Regression, with 5 fold cross-validation with Platt Scaling
    # This will output probabilities of each class
    stats = dat.predict(algorithm='svm', cv_dict=None, plot=False,**{'kernel':'linear', 'probability':True})
    assert isinstance(stats['weight_map'],Brain_Data)

    # Logistic classificiation, with 2 fold cross-validation.
    stats = dat.predict(algorithm='logistic', cv_dict={'type': 'kfolds', 'n_folds': 2}, plot=False)
    assert isinstance(stats['weight_map'],Brain_Data)

    # Ridge classificiation,
    stats = dat.predict(algorithm='ridgeClassifier', cv_dict=None,plot=False)
    assert isinstance(stats['weight_map'],Brain_Data)

    # Ridge
    stats = dat.predict(algorithm='ridge', cv_dict={'type': 'kfolds', 'n_folds': 2,'subject_id':holdout}, plot=False,**{'alpha':.1})

    # Lasso
    stats = dat.predict(algorithm='lasso', cv_dict={'type': 'kfolds', 'n_folds': 2,'stratified':dat.Y}, plot=False,**{'alpha':.1})

    # PCR
    stats = dat.predict(algorithm='pcr', cv_dict=None, plot=False)

    # Test Similarity
    r = dat.similarity(stats['weight_map'])
    assert len(r) == shape_2d[0]
    r2 = dat.similarity(stats['weight_map'].to_nifti())
    assert len(r2) == shape_2d[0]

    # Test apply_mask - might move part of this to test mask suite
    s1 = create_sphere([12, 10, -8], radius=10)
    assert isinstance(s1, nb.Nifti1Image)
    s2 = Brain_Data(s1)
    masked_dat = dat.apply_mask(s1)
    assert masked_dat.shape()[1] == np.sum(s2.data != 0)

    # Test extract_roi
    mask = create_sphere([12, 10, -8], radius=10)
    assert len(dat.extract_roi(mask)) == shape_2d[0]

    # Test r_to_z
    z = dat.r_to_z()
    assert z.shape() == dat.shape()

    # Test copy
    d_copy = dat.copy()
    assert d_copy.shape() == dat.shape()

    # Test detrend
    detrend = dat.detrend()
    assert detrend.shape() == dat.shape()

    # Test standardize
    s = dat.standardize()
    assert s.shape() == dat.shape()
    assert np.isclose(np.sum(s.mean().data), 0, atol=.1)
    s = dat.standardize(method='zscore')
    assert s.shape() == dat.shape()
    assert np.isclose(np.sum(s.mean().data), 0, atol=.1)

    # Test Sum
    s = dat.sum()
    assert s.shape() == dat[1].shape()

    # Test Groupby
    s1 = create_sphere([12, 10, -8], radius=10)
    s2 = create_sphere([22, -2, -22], radius=10)
    mask = Brain_Data([s1, s2])
    d = dat.groupby(mask)
    assert isinstance(d, Groupby)

    # Test Aggregate
    mn = dat.aggregate(mask, 'mean')
    assert isinstance(mn, Brain_Data)
    assert len(mn.shape()) == 1

    # Test Threshold
    s1 = create_sphere([12, 10, -8], radius=10)
    s2 = create_sphere([22, -2, -22], radius=10)
    mask = Brain_Data(s1)*5
    mask = mask + Brain_Data(s2)

    m1 = mask.threshold(thresh=.5)
    m2 = mask.threshold(thresh=3)
    m3 = mask.threshold(thresh='98%')
    assert np.sum(m1.data > 0) > np.sum(m2.data > 0)
    assert np.sum(m1.data > 0) == np.sum(m3.data > 0)

    # Test Regions
    r = mask.regions(min_region_size=10)
    m1 = Brain_Data(s1)
    m2 = r.threshold(1, binarize=True)
    # assert len(r)==2
    assert len(np.unique(r.to_nifti().get_data())) == 2 # JC edit: I think this is what you were trying to do
    diff = m2-m1
    assert np.sum(diff.data) == 0