Beispiel #1
0
    def _run_interface(self, runtime):

        beta_images = [nb.load(beta) for beta in self.inputs.betas]
        residuals = [nb.load(r).get_data() for r in self.inputs.residuals]
        design_matrices = [h5load(dm) for dm in self.inputs.design_matrices]

        mean_beta = np.mean([beta.get_data() for beta in beta_images], 0)
        nb.save(
            nb.Nifti1Image(mean_beta, beta_images[0].get_affine(), beta_images[0].get_header()),
            "sandwiched_beta.nii.gz",
        )

        V = np.zeros(mean_beta.shape + (mean_beta.shape[-1],))
        W = np.sum([bottleneck.ss(r, -1) for r in residuals], 0) / (len(residuals) - 1)

        for X, resid in zip(design_matrices, residuals):
            # W = resid.T.dot(resid)
            X_T_inv = np.linalg.pinv(np.dot(X.T, X))
            top_sandwich = np.outer(np.dot(X_T_inv, X.T), W).T.reshape((np.prod(W.shape), X_T_inv.shape[1], X.shape[0]))
            sandwich = np.dot(top_sandwich, np.dot(X, X_T_inv))
            V = V + sandwich.reshape(V.shape)

        V = V / len(design_matrices)

        nb.save(
            nb.Nifti1Image(V, beta_images[0].get_affine(), beta_images[0].get_header()), "sandwiched_variance.nii.gz"
        )

        return runtime
Beispiel #2
0
    def _run_interface(self, runtime):

        image = nb.load(self.inputs.data)
        data = image.get_data()
        design_matrix = h5load(self.inputs.design_matrix)

        X = design_matrix

        print X.shape, data.shape

        X_T_inv = np.linalg.pinv(np.dot(X.T, X))
        calc_beta = np.dot(X_T_inv, X.T)

        beta = np.dot(calc_beta, data.reshape(np.prod(data.shape[:-1]), data.shape[-1]).T)

        predicted = np.dot(X, beta)
        predicted = predicted.T.reshape((data.shape[:-1] + (X.shape[0],)))

        beta = beta.T.reshape(data.shape[:-1] + (X.shape[1],))
        resid = data - predicted
        ss = bottleneck.ss(resid, -1)

        # ss = resid.T.dot(resid)

        ols_var = np.outer(X_T_inv, ss)

        ols_var = ols_var.T.reshape(data.shape[:-1] + (ols_var.shape[0],))

        nb.save(nb.Nifti1Image(beta, image.get_affine(), image.get_header()), os.path.abspath("betas.nii.gz"))
        nb.save(nb.Nifti1Image(ols_var, image.get_affine(), image.get_header()), os.path.abspath("variances.nii.gz"))
        nb.save(nb.Nifti1Image(resid, image.get_affine(), image.get_header()), os.path.abspath("residuals.nii.gz"))

        return runtime
Beispiel #3
0
def load_create_save_ds(ds_save_p, dataset_list, ref_space, warp_files, mask, **kwargs):
    detrending = kwargs.get('detrending', True)
    use_zscore = kwargs.get('use_zscore', True)

    use_events = kwargs.get('use_events', False)
    anno_dir = kwargs.get('anno_dir', None)
    use_glm_estimates = kwargs.get('use_glm_estimates', False)
    targets = kwargs.get('targets', None)
    event_offset = kwargs.get('event_offset', None)
    event_dur = kwargs.get('event_dur', None)
    save_disc_space = kwargs.get('save_disc_space', True)

    rois = kwargs.get('rois', None)

    if ds_save_p.exists():
        ds = mvpa.h5load(str(ds_save_p))
    else:
        ds = preprocess_datasets(dataset_list, ref_space, warp_files, mask, detrending=detrending,
                                 use_zscore=use_zscore, use_events=use_events, anno_dir=anno_dir,
                                 use_glm_estimates=use_glm_estimates, targets=targets,
                                 event_offset=event_offset, event_dur=event_dur, rois=rois,
                                 save_disc_space=save_disc_space)
        mvpa.h5save(str(ds_save_p), ds) # , compression=9
    return ds
Beispiel #4
0
    for i in range(len(cam_list)):
        this = cam_list[i]
        cam_list[i] = np.concatenate(
            (this[3:, :], this[2:-1, :], this[1:-2, :], this[:-3, :]), axis=1)

    train_stim = np.concatenate((cam_list[0], cam_list[1], cam_list[3]),
                                axis=0)
    test_stim = cam_list[2]

    print(train_stim.shape, test_stim.shape)

    for hemi in hemispheres:
        # print('\nLoading hyperaligned mappers...')
        mappers = mv.h5load(
            os.path.join(
                mvpa_dir,
                'search_hyper_mappers_life_mask_nofsel_{0}.hdf5'.format(hemi)))
        all_corrs = []
        for test_p in participants:
            train_p = [x for x in participants if x != test_p]
            print(
                '\nLoading fMRI GIFTI data and using {0} as test participant...'
                .format(test_p))
            train_resp = []
            for run in [1, 2, 4]:
                avg = []
                for participant in train_p:
                    if run == 4:
                        resp = mappers[participant].forward(
                            load_data(
                                os.path.join(
Beispiel #5
0
                        help="If True, the results of the "
                        "glm will be plotted as a timeseries per run.",
                        default=False)
    parser.add_argument(
        '-ar',
        '--include_all_regressors',
        help="If you are plotting the "
        "time series, do you want the plot to contain all of the"
        " regressors?",
        default=False)

    args = parser.parse_args()

    # get the data
    ds_file = args.inputfile
    ds = mv.h5load(ds_file)

    # are there glm inputs?
    if args.eventdir:
        eventdir = args.eventdir
    if args.annotation:
        annot_dir = args.annotation

    results_dir = '/' + args.output + '/'
    # create the output dir if it doesn't exist
    if not os.path.isdir(results_dir):
        os.makedirs(results_dir)

    ds_type = args.dataset
    glm = args.glm
    bilateral = args.bilateral
Beispiel #6
0
    '9': 'sid000009',
    '10': 'sid000012',
    '11': 'sid000034',
    '12': 'sid000007'
}

n_conditions = 90
n_vertices = 40962

base_dir = '/home/nastase/social_actions'
scripts_dir = join(base_dir, 'scripts')
data_dir = join(base_dir, 'fmri', '1021_actions', 'derivatives')
suma_dir = join(data_dir, 'freesurfer', 'fsaverage6', 'SUMA')
mvpa_dir = join(data_dir, 'pymvpa')

condition_order = mv.h5load(join(scripts_dir, 'condition_order.hdf5'))
reorder = condition_order['reorder']
sparse_ordered_labels = condition_order['sparse_ordered_labels']

# Load in searchlight RDMs
sl_rdms = {}
for hemi in ['lh', 'rh']:
    sl_rdms[hemi] = {}
    for participant in participants.keys():
        sl_result = mv.h5load(
            join(
                mvpa_dir, 'no_roi_ids',
                'search_RDMs_sq_zscore_p{0}_{1}.hdf5'.format(
                    participant, hemi)))
        sl_sq = sl_result.samples.reshape(n_conditions, n_conditions,
                                          n_vertices)
                        should be done on the full dataset or on the dataset \
                        with only ROIs: 'full' or 'stripped' (default: stripped)",
                        type=str, default='stripped')
    parser.add_argument('-o', '--output', help="Please specify an output directory"
                                               "name (absolute path) to store the analysis results", type=str)
    parser.add_argument('--classifier', help="Which classifier do you want to use? Options:"
                                             "linear Gaussian Naive Bayes ('gnb'), linear (binary) stochastic "
                                             "gradient descent (l-sgd)",
                        type=str, required=True)

    args = parser.parse_args()

    # get the data
    ds_movie_path = args.inputfile1
    ds_loc_path = args.inputfile2
    ds_movie = mv.h5load(ds_movie_path)
    ds_loc = mv.h5load(ds_loc_path)

    # prepare the output path
    results_dir = '/' + args.output + '/'
    # create the output dir if it doesn't exist
    if not os.path.isdir(results_dir):
        os.makedirs(results_dir)

    # get more information about what is being calculated
    ds_type = args.dataset  # stripped --> no brain, no overlap,

    if args.bilateral == 'True' or args.bilateral == True:
        bilateral = True
    elif args.bilateral == 'False' or args.bilateral == False:
        bilateral = False  # True or False
samples_size = 12  #Length of segments in sec

if align == 'nonlinear':
    maskfile = os.path.join(datapath, 'templates', 'grpbold7Tad', 'qa',
                            'dico7Tad2grpbold7Tad_nl',
                            'brain_mask_intersection.nii.gz')
elif align == 'linear':
    maskfile = os.path.join(datapath, 'templates', 'grpbold7Tad', 'qa',
                            'dico7Tad2grpbold7Tad7Tad',
                            'brain_mask_intersection.nii.gz')

ds = mvpa.fmri_dataset(maskfile, mask=maskfile)
dsfile = '_z' + str(zsc) + '_' + str(samples_size) + '_' + align

#Load dataset of two subjects and reorganise for univariate analysis
evds1 = mvpa.h5load(os.path.join('dataset', subj1 + dsfile + '.hdf5'))
evds1 = evds1.mapper.reverse(evds1)
evds2 = mvpa.h5load(os.path.join('dataset', subj2 + dsfile + '.hdf5'))
evds2 = evds1.mapper.reverse(evds2)
evds = mvpa.vstack([evds1, evds2])
del evds1, evds2


# Prepare inter-subject correlation measure
class Corr(mvpa.Measure):
    is_trained = True

    def __init__(self, subj1, subj2, **kwargs):
        mvpa.Measure.__init__(self, **kwargs)
        self._subj1 = subj1
        self._subj2 = subj2
else:
    train_stim, test_stim = get_narrative_stim_for_fold(
        '{0}_{1}'.format(model, stimfile), fold_shifted, included)

for test_p in participants:
    if align == 'ws':
        train_resp, test_resp = get_ws_data(test_p, fold_shifted, included,
                                            hemi)
    elif align == 'aa':
        train_resp, test_resp = get_aa_data(test_p, fold_shifted, included,
                                            hemi)
    else:
        print('\nLoading hyperaligned mappers...')
        mappers = mv.h5load(
            os.path.join(
                mvpa_dir,
                'search_hyper_mappers_life_mask_nofsel_{0}_leftout_{1}.hdf5'.
                format(hemi, fold_shifted)))
        if align == 'ha_common':
            train_resp, test_resp = get_ha_common_data(test_p, mappers,
                                                       fold_shifted, included,
                                                       hemi)
        elif align == 'ha_testsubj':
            train_resp, test_resp = get_ha_testsubj_data(
                test_p, mappers, fold_shifted, included, hemi)

    alphas = np.logspace(0, 3, 20)
    nboots = len(included)
    chunklen = 15
    nchunks = 15
from scipy.io import loadmat
from sklearn.linear_model import RidgeCV,BayesianRidge
from sklearn.preprocessing import StandardScaler
from sklearn.cross_decomposition import PLSRegression
from pandas import read_csv
from sklearn.externals import joblib
from encoding_helpers import *

from itertools import combinations

T3 = False


for subj in xrange(12,13):
    subj_preprocessed_path = os.path.join('/home','mboos','SpeechEncoding','PreProcessed','subj%02dnpp.gzipped.hdf5' % subj)
    s1ds = mvpa.h5load(subj_preprocessed_path)
    events = mvpa.find_events(targets=s1ds.sa.targets,chunks=s1ds.sa.chunks)
    rvstr_TS = rolling_window(s1ds.sa['targets'][::-1].copy(),4)

    s1ds.sa['targets'].value[(np.where(np.apply_along_axis(lambda x : len(np.unique(x)) == 1 and x[0] != 'rest',1,rvstr_TS)[::-1])[0]+3)] = 'rest'
    labelsTS = s1ds.sa['targets'].value.copy()
    	
    	# <codecell>
    	
    	#unroll audio features
    	#cut last 500ms
    featureTS = np.zeros((labelsTS.shape[0],20*ft_freq))
    featureTS[labelsTS!='rest',:] = np.reshape(np.vstack([feature_dict[ev['targets']][:60,:] for ev in events if ev['targets']!='rest']),(-1,ft_freq*20))
    	
    	# <codecell>
    	
datapath = os.path.join('/home','data','exppsy','forrest_gump','openfmri.org')

##Parameter
zsc = 1				#Voxelwise zscoring
samples_size = 12	#Length of segments in sec

if align=='nonlinear':
	maskfile = os.path.join(datapath,'templates', 'grpbold7Tad','qa', 'dico7Tad2grpbold7Tad_nl','brain_mask_intersection.nii.gz')
elif align=='linear':
	maskfile = os.path.join(datapath,'templates', 'grpbold7Tad','qa', 'dico7Tad2grpbold7Tad7Tad','brain_mask_intersection.nii.gz')

ds = mvpa.fmri_dataset(maskfile, mask=maskfile)
dsfile = '_z'+str(zsc)+'_'+str(samples_size)+'_'+align

#Load dataset of two subjects and reorganise for univariate analysis
evds1 = mvpa.h5load(os.path.join('dataset',subj1+dsfile+'.hdf5'))
evds1 = evds1.mapper.reverse(evds1)
evds2 = mvpa.h5load(os.path.join('dataset',subj2+dsfile+'.hdf5'))
evds2 = evds1.mapper.reverse(evds2)
evds = mvpa.vstack([evds1,evds2])
del evds1, evds2

# Prepare inter-subject correlation measure
class Corr(mvpa.Measure):
	is_trained = True
	def __init__(self,subj1,subj2, **kwargs):
		mvpa.Measure.__init__(self, **kwargs)
		self._subj1 = subj1
		self._subj2 = subj2
	def _call(self, evds):
		res = 1-sd.pdist(np.hstack((evds[evds.sa.subj==self._subj1].samples,evds[evds.sa.subj==self._subj2].samples)).T,'correlation')
Beispiel #12
0
    def _run_interface(self, runtime):

        print self.inputs.contrast

        contrast = np.array(self.inputs.contrast)
        design_matrices = [mvpa.h5load(d) for d in self.inputs.design_matrices]
        residuals = [nb.load(r).get_data() for r in self.inputs.residuals]

        filename_W_var = os.path.join(mkdtemp(), "data.dat")
        # W_var = np.zeros(residuals[0].shape[:-1] + residuals[0].shape[-1:] + residuals[0].shape[-1:])
        W_var = np.memmap(
            filename_W_var,
            dtype="float32",
            mode="w+",
            shape=(residuals[0].shape[:-1] + residuals[0].shape[-1:] + residuals[0].shape[-1:]),
        )

        # Create Autocorrelation matrices, within voxels and across voxel and comparison voxel

        for fd in np.arange(W_var.shape[0]):
            for i, r in enumerate(residuals):
                W_var[fd] += np.einsum("...k,...h", r[fd], r[fd])

                # Normalize without bias
                W_var[fd] = W_var[fd] / (len(residuals) - 1)

        # Set up covariance matrices, both within voxel and across voxels
        V_var = np.zeros(residuals[0].shape[:-1] + (design_matrices[0].shape[1],) * 2)

        # Fill in covariance matrix using sandwich

        for fd in np.arange(W_var.shape[0]):
            for i, X in enumerate(design_matrices):
                X_T_inv = np.linalg.pinv(np.dot(X.T, X))
                V_var[fd] += np.rollaxis(X_T_inv.dot(X.T).dot(W_var[fd]).dot(X).dot(X_T_inv), 0, -1)

        # Normalize covariance matrix by number of replications
        V_var = V_var / len(design_matrices)

        variance_contrast = contrast.dot(V_var).dot(contrast.T)

        nb.save(
            nb.Nifti1Image(
                variance_contrast,
                nb.load(self.inputs.residuals[0]).get_affine(),
                nb.load(self.inputs.residuals[0]).get_header(),
            ),
            "variance_contrast.nii.gz",
        )

        if isdefined(self.inputs.comparison_voxel):

            filename_W_covar = os.path.join(mkdtemp(), "data.dat")
            # W_covar = np.zeros(residuals[0].shape[:-1] + residuals[0].shape[-1:] + residuals[0].shape[-1:])
            W_covar = np.memmap(
                filename_W_var,
                dtype="float32",
                mode="w+",
                shape=(residuals[0].shape[:-1] + residuals[0].shape[-1:] + residuals[0].shape[-1:]),
            )

            for fd in np.arange(W_var.shape[0]):
                for i, r in enumerate(residuals):
                    W_covar[fd] += np.einsum("...k,...h", r[fd], r[self.inputs.comparison_voxel])

                W_covar[fd] = W_covar[fd] / (len(residuals) - 1)

            V_covar = np.zeros(residuals[0].shape[:-1] + (design_matrices[0].shape[1],) * 2)

            for fd in np.arange(W_var.shape[0]):
                for i, X in enumerate(design_matrices):
                    X_T_inv = np.linalg.pinv(np.dot(X.T, X))
                    V_covar[fd] += np.rollaxis(X_T_inv.dot(X.T).dot(W_covar[fd]).dot(X).dot(X_T_inv), 0, -1)

            V_covar = V_covar / len(design_matrices)

            covariance_contrast = contrast.dot(V_covar).dot(contrast.T)

            covar_filename = "covariance_contrast_comp_voxel_%s.nii.gz" % "_".join(
                [str(e) for e in self.inputs.comparison_voxel]
            )

            nb.save(
                nb.Nifti1Image(
                    covariance_contrast,
                    nb.load(self.inputs.residuals[0]).get_affine(),
                    nb.load(self.inputs.residuals[0]).get_header(),
                ),
                covar_filename,
            )

        return runtime
Beispiel #13
0
    def _run_interface(self, runtime):

        clusters = nb.load(self.inputs.clusters).get_data()

        if np.sum(clusters) == 0:
            nb.save(nb.Nifti1Image(np.zeros(clusters.shape), np.identity(4)), "in_limbo.nii.gz")
            return runtime

        if self.inputs.global_threshold:
            clusters[clusters > 0] = 1

        # Get nearest-neighbors
        cluster_centers = [sp.ndimage.measurements.center_of_mass(clusters == c) for c in np.unique(clusters)[1:]]
        kdtree = sp.spatial.KDTree(cluster_centers)
        grid = np.mgrid[[slice(0, e) for e in clusters.shape]]
        r = kdtree.query(np.array([g.ravel() for g in grid]).T)

        assigned_clusters = r[1].reshape(clusters.shape) + 1

        # get minimum value within cluster
        z = nb.load(self.inputs.zmap).get_data()
        X = h5load(self.inputs.design_matrix)

        # get index of minimum per cluster
        cl_min_z = {}
        for cl in np.unique(clusters[clusters > 0]):
            ind = z[clusters == cl].argmin()
            cl_min_z[cl] = tuple(np.array(np.where(clusters == cl))[:, ind])

        # get complete variances
        variances = nb.load(self.inputs.variance).get_data()
        residuals = nb.load(self.inputs.residuals).get_data()
        betas = nb.load(self.inputs.beta).get_data()

        # per cluster, get t-value
        ts = np.zeros(z.shape[:-1])
        covars = np.zeros(z.shape[:-1])

        for cl in np.unique(clusters[clusters > 0]):

            # Index of minimum in cluster
            ind_min = tuple([slice(None)] + [e for e in cl_min_z[1]] + [0])
            var_min = variances[cl_min_z[cl]][0]
            beta_min = betas[cl_min_z[cl]][0]
            current_ind = (assigned_clusters == cl) & (clusters == 0)

            # Get covariances
            current_covars = np.zeros(np.sum(current_ind))

            for i in np.arange(residuals.shape[-1]):
                print residuals[:, current_ind, i].shape
                print residuals[ind_min].shape
                current_covars += np.dot(residuals[:, current_ind, i].squeeze().T, residuals[ind_min])

            current_covars = current_covars / residuals.shape[-1]

            pinv = np.linalg.pinv(np.dot(X.T, X))

            current_covars = np.outer(np.dot(np.dot(pinv, X.T), np.dot(X, pinv)), current_covars)[0, :]

            current_vars = variances[current_ind][:, 0]
            current_betas = betas[current_ind][:, 0]

            contrast = beta_min - current_betas
            denom_var = np.sqrt(var_min + current_vars - 2 * current_covars)
            ts[current_ind] = contrast / denom_var
            covars[current_ind] = current_covars

        threshold = sp.stats.t.ppf(1.0 - self.inputs.alpha, self.inputs.ddof)

        self.in_limbo = (np.abs(ts) < threshold) & (clusters == 0)

        nb.save(nb.Nifti1Image(np.array(self.in_limbo, dtype=int), np.identity(4)), "in_limbo.nii.gz")
        nb.save(nb.Nifti1Image(np.array(ts, dtype=int), np.identity(4)), "t.nii.gz")

        return runtime
Beispiel #14
0
                                               "the second dataset "
                                               "(movie or localizer)")
    parser.add_argument('-c', '--classifier', help="Which classifier"
                                                   "to use: gnb or"
                                                   "l-sgd")
    parser.add_argument('-b', '--bilateral', help="Is the dataset"
                                                  "lateralized or do we"
                                                  "have combined ROIs?",
                        default=True)
    parser.add_argument('-o', '--output', help="Where to save fig?")
    args = parser.parse_args()

    # load the data
    ds_1_path = args.input1
    ds_2_path = args.input2
    ds_1 = mv.h5load(ds_1_path)
    ds_2 = mv.h5load(ds_2_path)

    # check ds status
    if args.bilateral:
        bilateral = True
    else:
        bilateral = False

    if args.output:
        output = args.output
    else:
        output = None

    # get the classifier information
    valid_clfs = ['gnb', 'l-sgd']
Beispiel #15
0
    qe = mv.SurfaceQueryEngine(surf, 20.0, distance_metric='dijkstra')
    print("Finished creating surface-based searchlight query engine")

    rdm = mv.PDist(pairwise_metric='correlation', center_data=False)

    sl = mv.Searchlight(rdm,
                        queryengine=qe,
                        enable_ca=['roi_sizes'],
                        nproc=6,
                        roi_ids=cortical_vertices)
    mv.debug.active += ['SLC']
    for run in runs:
        if hyperalign:
            mappers = mv.h5load(
                join(
                    mvpa_dir,
                    'search_hyper_mappers_life_mask_nofsel_{0}_leftout_{1}.hdf5'
                    .format(hemi, run)))
        for participant in participants:

            # print(ds.sa['intents'])
            # to_avg = mean_group_feature()
            # averaged_response = to_avg(ds)

            print(
                "loading data for run {0}, hemisphere {1}, participant {2}...".
                format(run, hemi, participant))
            ds = mv.gifti_dataset(
                join(
                    sam_data_dir,
                    '{0}_task-life_acq-{1}vol_run-0{2}.{3}.tproject.gii'.
Beispiel #16
0
# switch = int(sys.argv[1])
# run = (switch % 4) + 1
# if switch < 4:
#     model ='aa'
# else:
#     model = 'ha'
# print(run, model)

for model in ['ha']:
    for run in [3]:
        # Load in surface data sets
        if model == 'ha':
            lh_mappers = mv.h5load(
                os.path.join(
                    mvpa_dir,
                    'search_hyper_mappers_life_mask_nofsel_lh_leftout_{0}.hdf5'
                    .format(run)))
            rh_mappers = mv.h5load(
                os.path.join(
                    mvpa_dir,
                    'search_hyper_mappers_life_mask_nofsel_rh_leftout_{0}.hdf5'
                    .format(run)))
        fc = 0
        fmri = []
        for participant in participants:
            print('\nLoading fMRI GIFTI data for {0}'.format(participant))
            if model == 'ha':
                rh = rh_mappers[participant].forward(
                    load_data(
                        os.path.join(
Beispiel #17
0
participants = ['sub-rid000001','sub-rid000005','sub-rid000006','sub-rid000009','sub-rid000012',\
                'sub-rid000014','sub-rid000017','sub-rid000019','sub-rid000024','sub-rid000027',\
                'sub-rid000031','sub-rid000032','sub-rid000033','sub-rid000034','sub-rid000036',\
                'sub-rid000037','sub-rid000038','sub-rid000041']

hemispheres = ['lh', 'rh']

for hemi in hemispheres:
    # Load surface and create searchlight query engine
    surf = mv.surf.read(join(suma_dir, '{0}.pial.gii'.format(hemi)))
    qe = mv.SurfaceQueryEngine(surf, 20.0, distance_metric='dijkstra')
    print("Finished creating surface-based searchlight query engine")

    # Optional load hyperalignment mappers
    if hyperalign:
        mappers1 = mv.h5load(join(mvpa_dir, 'search_hyper_mappers_life_mask_nofsel_{0}_leftout_1.hdf5'.format(hemi)))
        mappers2 = mv.h5load(join(mvpa_dir, 'search_hyper_mappers_life_mask_nofsel_{0}_leftout_2.hdf5'.format(hemi)))
        mappers3 = mv.h5load(join(mvpa_dir, 'search_hyper_mappers_life_mask_nofsel_{0}_leftout_3.hdf5'.format(hemi)))
        mappers4 = mv.h5load(join(mvpa_dir, 'search_hyper_mappers_life_mask_nofsel_{0}_leftout_4.hdf5'.format(hemi)))
        mappers = [mappers1, mappers2, mappers3, mappers4]
        print("Loaded hyperalignment mappers")

for participant in participants:

    # Load in functional data
    ds1 = mv.gifti_dataset(join(sam_data_dir, '{0}_task-life_acq-{1}vol_run-0{2}.{3}.tproject.gii'.format(participant, tr[1], 1, hemi)))
    ds2 = mv.gifti_dataset(join(sam_data_dir, '{0}_task-life_acq-{1}vol_run-0{2}.{3}.tproject.gii'.format(participant, tr[2], 2, hemi)))
    ds3 = mv.gifti_dataset(join(sam_data_dir, '{0}_task-life_acq-{1}vol_run-0{2}.{3}.tproject.gii'.format(participant, tr[3], 3, hemi)))
    ds4 = mv.gifti_dataset(join(sam_data_dir, '{0}_task-life_acq-{1}vol_run-0{2}.{3}.tproject.gii'.format(participant, tr[4], 4, hemi)))

    # Exclude medial wall
from __future__ import division
import mvpa2.suite as mvpa
from joblib import dump
import numpy as np
import sys
from sklearn.cross_validation import KFold

subj = sys.argv[1]
subj = int(subj)

spenc_dir = '/home/mboos/SpeechEncoding/'

subj_preprocessed_path = 'PreProcessed/FG_subj{}.gzipped.hdf5'.format(subj)
s1ds = mvpa.h5load(spenc_dir + subj_preprocessed_path)

duration = np.array([902,882,876,976,924,878,1084,676])

# i did not kick out the first/last 4 samples per run yet
slice_nr_per_run = [dur/2 for dur in duration]

# use broadcasting to get indices to delete around the borders
idx_borders = np.cumsum(slice_nr_per_run[:-1])[:,np.newaxis] + \
              np.arange(-4,4)[np.newaxis,:]

fmri_data = np.delete(s1ds.samples, idx_borders, axis=0)

# and we're going to remove the last fmri slice
# since it does not correspond to a movie part anymore
fmri_data = fmri_data[:-1, :]

# shape of TR samples
Beispiel #19
0
import os
import numpy as np
from mvpa2.suite import h5load

DATA_DIR = '/idata/DBIC/fma/id_studies/home/preprocess/aligned/raiders-32ch/'
NP_DATA_DIR = '/ihome/cara/cvu_thesis/hyper_raiders_brain_data/'

subjects = ['sub-rid000001', 'sub-rid000008', 'sub-rid000009', 'sub-rid000012', 'sub-rid000013', \
            'sub-rid000016', 'sub-rid000017', 'sub-rid000018', 'sub-rid000019', 'sub-rid000021', \
            'sub-rid000022', 'sub-rid000024', 'sub-rid000025', 'sub-rid000026', 'sub-rid000027', \
            'sub-rid000031', 'sub-rid000032', 'sub-rid000036', 'sub-rid000037', 'sub-rid000041']
hemispheres = ['lh', 'rh']
runs = ['1-2-3-4', '5-6-7-8']

for subj in subjects:
    for hemi in hemispheres:
        for run in runs:
            print('{0}_{1}_runs{2}_hyperalign'.format(subj, hemi, run))
            filename = DATA_DIR + '{0}_{1}_qhyper-to-raiders-8ch_ico32_z_r20.0_sl-avg_reflection_scaling_non-norm-row_runs{2}.hdf5.gz'.format(
                subj, hemi, run)
            # get pymvpa dataset
            ds = h5load(filename)
            # save numpy array
            np.save(
                NP_DATA_DIR +
                '{0}_{1}_runs{2}_hyperalign.npy'.format(subj, hemi, run),
                ds.samples)