Exemple #1
0
def extract_rois_signals(preprocessing_folder ='pipeline_2', prefix= 'resampled_wr'):
    dataset = load_dynacomp(preprocessing_folder = preprocessing_folder,prefix = prefix)
    for idx, func in enumerate([dataset.func1, dataset.func2]):
      for i in range(len(dataset.subjects)):
	tic = time.clock()
	print func[i]
	output_path, _ = os.path.split(func[i])
	print dataset.subjects[i]
	maps_img = dict_to_list(dataset.rois[i])
	#add mask, smoothing, filter and detrending
	print 'Nifti'
	masker = NiftiMapsMasker(maps_img=maps_img,
				mask_img = dataset.mask,
				low_pass = .1,
				high_pass = .01,
				smoothing_fwhm =6.,
				t_r = 1.05,
				detrend = True,
				standardize = False,
				resampling_target ='data',
				memory_level = 0,
				verbose=5)
	
	#extract signal to x
	print 'masker'
	x = masker.fit_transform(func[i])
	print x
	np.save(os.path.join(PATH_TO_SAVE_DATA,'output' + str(i+1) +'_rois_filter'),x)
	
      print time.clock() - tic
      
      return x
Exemple #2
0
import os
import numpy as np
from ICode.loader import load_dynacomp
from ICode.estimators.hurst_estimator import Hurst_Estimator
from statsmodels.sandbox.stats.multicomp import multipletests
from scipy.stats import ttest_ind, ttest_1samp
from mne.stats import permutation_t_test
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.datasets.base import Bunch
from matplotlib import rc as changefont

dataset = load_dynacomp(preprocessing_folder='pipeline_1', prefix='wr')
lr = LogisticRegression()
groups = [['av', 'v'], ['av', 'avn'], ['v', 'avn']]


def classify_group(group, fc):
    """Classification for a pair of groups
    """
    ind = np.hstack(
        (dataset.group_indices[group[0]], dataset.group_indices[group[1]]))
    #X =  fc[ind, :]
    X = StandardScaler().fit_transform([fc[i] for i in ind])
    y = np.array([1] * len(dataset.group_indices[group[0]]) +
                 [-1] * len(dataset.group_indices[group[1]]))
    sss = StratifiedShuffleSplit(y, n_iter=50, test_size=.25, random_state=42)
Exemple #3
0
import os
import numpy as np
from ICode.loader import load_dynacomp
from ICode.estimators.hurst_estimator import Hurst_Estimator
from statsmodels.sandbox.stats.multicomp import multipletests
from scipy.stats import ttest_ind, ttest_1samp
from mne.stats import permutation_t_test
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.datasets.base import Bunch
from matplotlib import rc as changefont

dataset = load_dynacomp(preprocessing_folder='pipeline_1',
                        prefix='wr')
lr = LogisticRegression()
groups = [ ['av', 'v'], ['av', 'avn'], ['v', 'avn'] ]


def classify_group(group, fc):
    """Classification for a pair of groups
    """
    ind = np.hstack((dataset.group_indices[group[0]],
                    dataset.group_indices[group[1]]))
    #X =  fc[ind, :]
    X = StandardScaler().fit_transform([fc[i] for i in ind])
    y = np.array([1]* len(dataset.group_indices[group[0]]) +
                 [-1]* len(dataset.group_indices[group[1]]))
    sss = StratifiedShuffleSplit(y, n_iter=50, test_size=.25, random_state=42)