def perform_analysis():
    results = []

    for subj in subj_list:
        labels = [
            mtk.get_behavior_data(behav_dir, subj, run, label) for run in runs
        ]

        imgs = [
            mtk.masking_fmri_image(img_data[(subj, run)], mask) for run in runs
        ]

        data_xs = np.concatenate(imgs)
        data_ys = list(labels[0]['task_type']) + list(labels[1]['task_type'])

        group = [1 for _ in labels[0]['task_type']
                 ] + [2 for _ in labels[1]['task_type']]
        cv = GroupKFold(n_splits=2)

        cv_scores = cross_val_score(estimator,
                                    data_xs,
                                    data_ys,
                                    cv=cv,
                                    groups=group)

        results.append(list(cv_scores))

    return results
def initial_images(standardize_trial=False):
    result = {}

    for run in runs:
        for subj in subj_list:
            labels = mtk.get_behavior_data(behav_dir, subj, run, 'color')

            # load and resampling image
            img = mtk.load_5d_fmri_image(data_dir + 'tvalsLSA.%s.r0%d.nii.gz' %
                                         (subj, run))
            img = nilearn.image.index_img(img, labels['order'] - 1)

            if standardize_trial:
                temp = mtk.masking_fmri_image(img, mask_img)
                temp = temp - np.tile(np.mean(temp, axis=1),
                                      (temp.shape[-1], 1)).T
                img = nilearn.masking.unmask(temp, mask_img)

            img = nilearn.image.resample_img(img,
                                             roi_masks[0].affine,
                                             roi_masks[0].shape,
                                             interpolation='nearest')

            result[subj, run] = img

    return result
Esempio n. 3
0
def perform_analysis():
    results = []

    for subj in subj_list:
        labels = mtk.get_behavior_data(behav_dir, subj, run, label)
        imgs = mtk.masking_fmri_image(img_data[subj], mask)

        data_xs = imgs
        data_ys = list(labels['task_type'])
        cv = mtk.BalancedShuffleSplit(n_splits=2)

        cv_scores = cross_val_score(estimator, data_xs, data_ys, cv=cv)

        results.append(list(cv_scores))

    return results
Esempio n. 4
0
def _perform_analysis(subj, estimator, run, label, niter, standardize=True):
    if estimator == 'gnb':
        estimator = GaussianNB()
    elif estimator == 'svc':
        estimator = Pipeline([('scale', StandardScaler()),
                              ('svc', LinearSVC())])

    # load behavioral data
    labels = get_behavior_data(behav_dir, subj, run, label)

    # load fmri data
    img = load_fmri_image(data_dir, subj, run, labels, standardize=standardize)
    y = labels['task_type']

    return run_roi_based_mvpa(estimator,
                              img,
                              y,
                              roi_masks,
                              'balanced',
                              n_iter=niter)
Esempio n. 5
0
def _perform_analysis(subj, estimator, run, label):
    if estimator == 'gnb':
        estimator = GaussianNB()
    elif estimator == 'svc':
        estimator = Pipeline([('scale', StandardScaler()),
                              ('svc', LinearSVC())])

    # load behavioral data
    labels = get_behavior_data(behav_dir,
                               subj,
                               run,
                               label,
                               contain_groups=(1, ))

    # load fmri data
    img = load_fmri_image(data_dir, subj, run, labels)
    y = labels['task_type']

    return labels['order'], run_decoding_time_series(estimator, img, y,
                                                     roi_masks)
        'color': [3, 4],
    }

    runs = run_number_dict[label]

    # load mask file
    mask_img = get_full_mask(data_dir)

    for subj in subj_list:
        print('starting run %s, %s label' % (subj, label))

        for run in runs:
            # load behavioral data
            labels = get_behavior_data(behav_dir,
                                       subj,
                                       run,
                                       label,
                                       stratified_group=True)

            # load fmri file
            img = load_fmri_image(data_dir, subj, run, labels)

            X = img
            y = list(labels['task_type'])
            group = list(labels['group'])

            searchlight_img = run_searchlight(mask_img,
                                              X,
                                              y,
                                              group,
                                              estimator,
    run_number_dict = {
        'move': [3, 5],
        'plan': [3, 4],
        'color': [3, 4],
    }

    runs = run_number_dict[label]

    # load mask file
    mask_img = mtk.get_full_mask(mask_dir)

    for subj in subj_list:
        print('starting run %s, %s label' % (subj, label))

        labels = [
            mtk.get_behavior_data(behav_dir, subj, run, label)
            for run in runs
        ]

        imgs = [
            nilearn.image.index_img(
                mtk.load_5d_fmri_image(data_dir + 'tvalsLSA.%s.r0%d.nii.gz' % (subj, run)),
                label['order'] - 1)
            for label, run in zip(labels, runs)
        ]

        X = nilearn.image.concat_imgs(imgs)
        y = list(labels[0]['task_type']) + list(labels[1]['task_type'])
        group = [1 for _ in labels[0]['task_type']] + [2 for _ in labels[1]['task_type']]

        searchlight_img = mtk.run_searchlight(mask_img, X, y, group, group_k=2,