Пример #1
0
def test_compute_multi_epi_mask():
    # Check that an empty list of images creates a meaningful error
    assert_raises(TypeError, compute_multi_epi_mask, [])
    # As it calls intersect_masks, we only test resampling here.
    # Same masks as test_intersect_masks
    mask_a = np.zeros((4, 4, 1), dtype=np.bool)
    mask_a[2:4, 2:4] = 1
    mask_a_img = Nifti1Image(mask_a.astype(int), np.eye(4))

    mask_b = np.zeros((8, 8, 1), dtype=np.bool)
    mask_b[2:6, 2:6] = 1
    mask_b_img = Nifti1Image(mask_b.astype(int), np.eye(4) / 2.)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", MaskWarning)
        assert_raises(ValueError, compute_multi_epi_mask,
                      [mask_a_img, mask_b_img])
    mask_ab = np.zeros((4, 4, 1), dtype=np.bool)
    mask_ab[2, 2] = 1
    mask_ab_ = compute_multi_epi_mask([mask_a_img, mask_b_img],
                                      threshold=1.,
                                      opening=0,
                                      target_affine=np.eye(4),
                                      target_shape=(4, 4, 1))
    assert_array_equal(mask_ab, get_data(mask_ab_))
Пример #2
0
def compute_confounds(imgs, mask_img, n_confounds=5, get_randomized_svd=False,
                      compute_not_mask=False):
    """
    """
    confounds = []
    if not isinstance(imgs, collections.Iterable) or \
            isinstance(imgs, _basestring):
        imgs = [imgs, ]

    img = _utils.check_niimg_4d(imgs[0])
    shape = img.shape[:3]
    affine = get_affine(img)

    if isinstance(mask_img, _basestring):
        mask_img = _utils.check_niimg_3d(mask_img)

    if not _check_same_fov(img, mask_img):
        mask_img = resample_img(
            mask_img, target_shape=shape, target_affine=affine,
            interpolation='nearest')

    if compute_not_mask:
        print("Non mask based confounds extraction")
        not_mask_data = np.logical_not(mask_img.get_data().astype(np.int))
        whole_brain_mask = masking.compute_multi_epi_mask(imgs)
        not_mask = np.logical_and(not_mask_data, whole_brain_mask.get_data())
        mask_img = new_img_like(img, not_mask.astype(np.int), affine)

    for img in imgs:
        print("[Confounds Extraction] {0}".format(img))
        img = _utils.check_niimg_4d(img)
        print("[Confounds Extraction] high ariance confounds computation]")
        high_variance = high_variance_confounds(img, mask_img=mask_img,
                                                n_confounds=n_confounds)
        if compute_not_mask and get_randomized_svd:
            signals = masking.apply_mask(img, mask_img)
            non_constant = np.any(np.diff(signals, axis=0) != 0, axis=0)
            signals = signals[:, non_constant]
            signals = signal.clean(signals, detrend=True)
            print("[Confounds Extraction] Randomized SVD computation")
            U, s, V = randomized_svd(signals, n_components=n_confounds,
                                     random_state=0)
            if high_variance is not None:
                confound_ = np.hstack((U, high_variance))
            else:
                confound_ = U
        else:
            confound_ = high_variance
        confounds.append(confound_)

    return confounds
Пример #3
0
def masking(func, output_dir):
    """compute the mask for all sessions"""
    # compute the mask for all sessions
    # save computed mask
    mask_img = compute_multi_epi_mask(func, upper_cutoff=.7, lower_cutoff=.4)
    vox_vol = np.abs(np.linalg.det(mask_img.affine[:3, :3]))
    full_vol = mask_img.get_data().sum() * vox_vol
    ref_vol = 1350000
    if full_vol < ref_vol:
        raise ValueError("wrong mask: volume is %f, should be larger than %f" %
                         (full_vol, ref_vol))
    mask_path = os.path.join(output_dir, "mask.nii.gz")
    print("Saving mask image %s" % mask_path)
    mask_img.to_filename(mask_path)
    # todo: cache this then add masking in pypreprocess
    return mask_img
Пример #4
0
def make_niimg_masks(i):
    tl.files.exists_or_mkdir(data_path+str(i))
    
    
    patient_path = path + 'training_' + str(i) + '/'
    pMris = [glob(os.path.join(patient_path, '*.Brain.XX.O.MR_' + mri_type+'.*'))for mri_type in mri_types[1:]]
  #  print(pMris)
    niimg = [glob(os.path.join(pMri[0], '*'+'.nii'))[0] for pMri in pMris]
    affines = [nib.load(img).affine for img in niimg]
   # print(nib.load(niimg[0]).affine)
    niimg = [nib.load(img).get_data() for img in niimg]
    print (niimg[0].shape)
    
  #  niimg = [ndimage.zoom(img, (1,1,6)) for img in niimg]
    niimg = [nib.Nifti1Image(niimg[j], affine=affines[j]) for j in range(len(niimg))]
    niimg = [nilearn.image.smooth_img(img, fwhm=6)for img in niimg]
  #  print(niimg[0].get_data().shape)
   # niimg = [nilearn.image.resample_img(img, target_affine=np.eye(4))for img in niimg]
  #  print(niimg.get_affine() )
   # niimg = nilearn.image.mean_img(niimg)
  #  brainmask = [nilearn.masking.compute_epi_mask(img) for img in niimg]
  #  brainmask = [nilearn.image.resample_img(img, target_affine=np.eye(3))for img in brainmask]
    
    brainmask = compute_multi_epi_mask(niimg)
    affine = brainmask.affine
  #  print(brainmask.get_data().shape)
  #  print(brainmask.get_affine() )
    pMris1 = glob(os.path.join(patient_path, '*.Brain.XX.O.MR_' + '4DPWI'+'.*'))    
    im = glob(os.path.join(pMris1[0], '*'+'.nii'))[0]
    im = nib.load(im).get_data()    
    im = np.squeeze(im)
    im = np.mean(im, axis=3)#added after several times without this
  #  im = ndimage.zoom(im, (1,1,6))
    im = nib.Nifti1Image(im, affine=affine)
    niimg1 = nilearn.image.smooth_img(im, fwhm=6)
    
  #  print(niimg1.get_affine())
  #  niimg1 = nilearn.image.mean_img(niimg1)
    
    brainmask1 = compute_epi_mask(niimg1)
  #  print(brainmask1.get_data().shape)
    brainmask2 = nilearn.masking.intersect_masks([brainmask, brainmask1], threshold=.8)
    path1 = data_path + str(i) + '/'
    nib.save(brainmask2, os.path.join(path1, 'brainmask.nii.gz'))
    print(patient_path, path1)
Пример #5
0
def get_population_mask(path_fmri=dataset_path+ '/datasets/01/fMRI/'):

    individuals = sorted([f for f in listdir(path_fmri) if isdir(join(path_fmri, f))])

    individuals_images = []
    
    target_affine = image.load_img(path_fmri + individuals[0] + '/3_nw_mepi_rest_with_cross.nii.gz').affine
    target_shape = image.load_img(path_fmri + individuals[0] + '/3_nw_mepi_rest_with_cross.nii.gz').shape
    target_shape = (target_shape[0], target_shape[1], target_shape[2])
    
    for individual in individuals:
        fmri_file = '/3_nw_mepi_rest_with_cross.nii.gz'
        individual_path = path_fmri + individual + fmri_file
        
        if(image.load_img(individual_path).affine[0][-1] != 0.0):
            
            fmri_image = image.resample_img(image.load_img(individual_path), target_affine=target_affine, target_shape=target_shape)
            
            individuals_images += [fmri_image]

    concatenated_imgs = image.concat_imgs(individuals_images)

    return NiftiMasker(compute_multi_epi_mask(individuals_images), standardize=True).fit(concatenated_imgs)
Пример #6
0
def test_compute_multi_epi_mask():
    # Check that an empty list of images creates a meaningful error
    assert_raises(TypeError, compute_multi_epi_mask, [])
    # As it calls intersect_masks, we only test resampling here.
    # Same masks as test_intersect_masks
    mask_a = np.zeros((4, 4, 1), dtype=np.bool)
    mask_a[2:4, 2:4] = 1
    mask_a_img = Nifti1Image(mask_a.astype(int), np.eye(4))

    mask_b = np.zeros((8, 8, 1), dtype=np.bool)
    mask_b[2:6, 2:6] = 1
    mask_b_img = Nifti1Image(mask_b.astype(int), np.eye(4) / 2.)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", MaskWarning)
        assert_raises(ValueError, compute_multi_epi_mask,
                      [mask_a_img, mask_b_img])
    mask_ab = np.zeros((4, 4, 1), dtype=np.bool)
    mask_ab[2, 2] = 1
    mask_ab_ = compute_multi_epi_mask([mask_a_img, mask_b_img], threshold=1.,
                                      opening=0,
                                      target_affine=np.eye(4),
                                      target_shape=(4, 4, 1))
    assert_array_equal(mask_ab, mask_ab_.get_data())
Пример #7
0
def get_data(project_data, dataset, debug, project_wd, resamplefactor, raw,
             analysis):
    ''' Load the csv files and return
    :param project_data:
    :param dataset:
    :param debug:
    :param project_wd:
    :param resamplefactor:
    :raw: Which type of fressesfurfer should we analyse (the raw, where both
    datasets have not been matched or the not raw where the number of columns
    between dataset is the same)
    :return: demographics:
    :return: demographics:
    :return: dataframe.values: Just the numeric values of the dataframe
    '''

    if dataset == 'freesurf_combined' and raw == True:
        raise ValueError('The combined analysis cannot use the raw dataset')
        print('Loading Brain image data')
    elif dataset == 'OASIS':
        # remove the file end and get list of all used subjects
        fileList = os.listdir(project_data)
        rawsubjectsId = [
            re.sub(r'^smwc1(.*?)\_mpr-1_anon.nii$', '\\1', file)
            for file in fileList if file.endswith('.nii')
        ]
        # TODO: Change this. For testing purpose select just the first 5 subjects
        #rawsubjectsId = rawsubjectsId[:25]

        # Load the demographics for each subject
        demographics, selectedSubId = get_data_covariates(
            project_data, rawsubjectsId, dataset)
        # print subjects mean age
        get_mean_age(demographics)
        # Load image proxies
        imgs = [
            nib.load(
                os.path.join(project_data, 'smwc1%s_mpr-1_anon.nii' % subject))
            for subject in tqdm(selectedSubId)
        ]

    elif dataset == 'BANC':
        # For now, performing analysis on White Matter.
        project_data_path = os.path.join(project_data, 'wm_data')
        # remove the file end and get list of all used subjects
        fileList = os.listdir(project_data_path)
        rawsubjectsId = [
            file[5:12] for file in fileList if file.endswith('.nii.gz')
        ]
        # TODO: select only a set of 5 subjects
        # rawsubjectsId = rawsubjectsId[:5]

        # Load the demographics for each subject
        demographics, selectedSubId = get_data_covariates(
            project_data, rawsubjectsId, dataset)
        # print subjects mean age
        get_mean_age(demographics)
        # Get the file path of the selected subjects
        subjectsFile = [
            os.path.join(project_data_path, file) for file in fileList
            if file[5:12] in selectedSubId
        ]

        # Load image proxies
        with Pool() as p:
            imgs = list(
                tqdm(p.imap(_load_nibabel, subjectsFile),
                     total=len(selectedSubId)))

    elif (dataset == 'BANC_freesurf' and raw == True):
        freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
                                               'freesurfer_preprocess',
                                               'original_dataset', 'BANC',
                                               'aparc_aseg_stats_BANC.csv'),
                                  delimiter=',',
                                  index_col=0)
        rawsubjectsId = freesurf_df.index

        # Load the demographics for each subject
        demographics, selectedSubId = get_data_covariates(
            project_data, rawsubjectsId, 'BANC')
        # return numpy array of the dataframe
        # Rename columns to maintain consistency withe ukbio
        demographics.rename(index=str,
                            columns={
                                'ID': 'id',
                                'Age': 'age'
                            },
                            inplace=True)
        return demographics, None, freesurf_df

    elif (dataset == 'UKBIO_freesurf' and raw == False
          and not analysis == 'summary_data'):
        freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
                                               'freesurfer_preprocess',
                                               'matched_dataset',
                                               'aparc_aseg_UKBIO.csv'),
                                  delimiter=',')
        # Read the full matrix to get the demographics information
        ukbio_full_df = pd.read_csv(os.path.join(
            project_wd, 'BayOptPy', 'freesurfer_preprocess',
            'original_dataset', 'UKBIO', 'UKB_10k_FS_4844_combined.csv'),
                                    delimiter=',',
                                    index_col=False)
        demographics = ukbio_full_df[['age', 'sex', 'id']].copy()
        freesurf_df = freesurf_df.set_index('id')
        return demographics, None, freesurf_df
    elif (dataset == 'BANC_freesurf' and raw == False
          and not analysis == 'summary_data'):
        freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
                                               'freesurfer_preprocess',
                                               'matched_dataset',
                                               'aparc_aseg_BANC.csv'),
                                  delimiter=',',
                                  index_col=0)
        rawsubjectsId = freesurf_df.index

        # Load the demographics for each subject
        demographics, selectedSubId = get_data_covariates(
            project_data, rawsubjectsId, 'BANC')
        # return numpy array of the dataframe
        # Rename columns to maintain consistency withe ukbio
        demographics.rename(index=str,
                            columns={
                                'ID': 'id',
                                'Age': 'age'
                            },
                            inplace=True)
        return demographics, None, freesurf_df

    elif (dataset == 'UKBIO_freesurf' and raw == True
          and not analysis == 'summary_data'):
        freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
                                               'freesurfer_preprocess',
                                               'original_dataset', 'UKBIO',
                                               'UKB_10k_FS_4844_combined.csv'),
                                  delimiter=',')
        freesurf_df = freesurf_df.drop(columns='id.4844')
        demographics = freesurf_df[['age', 'sex', 'id']].copy()
        freesurf_df = freesurf_df.set_index('id')
        return demographics, None, freesurf_df
    elif (dataset == 'UKBIO_freesurf' and raw == False
          and analysis == 'summary_data'):
        # This dataset contains only 21 feature that represent summary metrics
        freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
                                               'freesurfer_preprocess',
                                               'matched_dataset',
                                               'aparc_aseg_UKBIO_summary.csv'),
                                  delimiter=',')
        # Read the full matrix to get the demographics information
        ukbio_full_df = pd.read_csv(os.path.join(
            project_wd, 'BayOptPy', 'freesurfer_preprocess',
            'original_dataset', 'UKBIO', 'UKB_10k_FS_4844_combined.csv'),
                                    delimiter=',')
        demographics = ukbio_full_df[['age', 'sex', 'id']].copy()
        return demographics, None, freesurf_df
    elif (dataset == 'BANC_freesurf' and raw == False
          and analysis == 'summary_data'):
        # This dataset contains only 21 feature that represent summary metrics
        freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
                                               'freesurfer_preprocess',
                                               'matched_dataset',
                                               'aparc_aseg_BANC_summary.csv'),
                                  delimiter=',',
                                  index_col=0)
        rawsubjectsId = freesurf_df.index

        # Load the demographics for each subject
        demographics, selectedSubId = get_data_covariates(
            project_data, rawsubjectsId, 'BANC')
        # Rename columns to maintain consistency withe ukbio
        demographics.rename(index=str,
                            columns={
                                'ID': 'id',
                                'Age': 'age'
                            },
                            inplace=True)
        return demographics, None, freesurf_df

    elif (dataset == 'freesurf_combined'):
        ukbio_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
                                            'freesurfer_preprocess',
                                            'matched_dataset',
                                            'aparc_aseg_UKBIO.csv'),
                               delimiter=',',
                               index_col=0)

        banc_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
                                           'freesurfer_preprocess',
                                           'matched_dataset',
                                           'aparc_aseg_BANC.csv'),
                              delimiter=',',
                              index_col=0)
        ukbio_full_df = pd.read_csv(os.path.join(
            project_wd, 'BayOptPy', 'freesurfer_preprocess',
            'original_dataset', 'UKBIO', 'UKB_10k_FS_4844_combined.csv'),
                                    delimiter=',')
        rawsubjectsId = banc_df.index
        # Load the demographics for each subject
        banc_demographics, selectedSubId = get_data_covariates(
            project_data, rawsubjectsId, 'BANC')
        ukbio_demographics = ukbio_full_df[['age', 'sex', 'id']].copy()
        # Concatenate both freesurfeer datasets
        freesurfer_df = pd.concat([ukbio_df, banc_df])

        # Concatenate demographics information (Age and Sex)
        tmp = banc_demographics.drop('original_dataset', axis=1)
        tmp.rename(index=str, columns={'ID': 'id', 'Age': 'age'}, inplace=True)
        # transform M/F into male/female
        tmp['sex'] = tmp['sex'].map({'F': 'female', 'M': 'male'})
        # Add column to specify dataset
        tmp['dataset'] = 'banc'
        ukbio_demographics['dataset'] = 'ukbio'
        demographics = pd.concat([ukbio_demographics, tmp], sort=False)
        # TODO: For now assume that the index in the BIOBANK correspond to th
        # Stratify subjects. Divide them into classes <30, 30<40, 40<50, 50<60,
        # 60<70, 70<80, 80<90, 90<100. Each age will be then further stratified
        # into F/M.
        bins = (17, 30, 40, 50, 60, 70, 80, 90)
        group_labels = range(1, len(bins))
        demographics['age_band'] = pd.cut(demographics['age'],
                                          bins,
                                          labels=group_labels)
        sex_age_group = demographics.groupby(['sex', 'age_band'])
        # Note that the following groups are created:
        # ('female', 1), ('female', 2), ('female', 3), ('female', 4), ('female',  5),
        # ('female', 6), ('female', 7), ('male', 1), ('male', 2), ('male', 3),
        # ('male', 4), ('male', 5), ('male', 6), ('male', 7)]
        # This will label the groups cited above in a crescent order. In total
        # you will have 1-14 groups, grouped according to their age and sex
        demographics['stratify'] = sex_age_group.grouper.group_info[0] + 1
        #same order between both fines
        return demographics, None, freesurfer_df

    else:
        raise ValueError('Analysis for this dataset is not yet implemented!')

    print('Resample the dataset by a factor of %d' % resamplefactor)
    print('Original image size: %s' % (imgs[0].shape, ))
    # resample dataset to a lower quality. Increase the voxel size by two
    resampleby2affine = np.array([[resamplefactor, 1, 1, 1],
                                  [1, resamplefactor, 1, 1],
                                  [1, 1, resamplefactor, 1], [1, 1, 1, 1]])
    target_affine = np.multiply(imgs[0].affine, resampleby2affine)
    print('Resampling Images')
    with Pool() as p:
        args = partial(_multiprocessing_resample, target_affine=target_affine)
        resampledimgs = list(tqdm(p.imap(args, imgs), total=len(imgs)))
    print('Resampled image size: %s' % (resampledimgs[0].shape, ))

    # Use nilearn to mask only the brain voxels across subjects
    print('Compute brain mask')
    #The lower and the upper_cutoff represent the lower and the upper fraction of the histogram to be discarded
    MeanImgMask = masking.compute_multi_epi_mask(resampledimgs,
                                                 lower_cutoff=0.001,
                                                 upper_cutoff=.85,
                                                 opening=False)
    # Apply the group mask on all subjects.
    # Note: The apply_mask function returns the flattened data as a numpy array
    maskedData = [
        masking.apply_mask(img, MeanImgMask) for img in resampledimgs
    ]
    # If debug option is set, save an nifti image of the image.
    # Note: if you resampled the image you will not be able to overlay it on the original brain
    if debug:
        mask_path = os.path.join(project_wd, 'BayOptPy', 'tpot')
        print('Saving brain mask: %s' % mask_path)
        nib.save(MeanImgMask,
                 os.path.join(mask_path, 'mask_%s.nii.gz' % dataset))
    print('Applied mask to the dataset')

    # Transform the imaging data into a np array (subjects x voxels)
    maskedData = np.array(maskedData)

    return demographics, imgs, maskedData
Пример #8
0
def do_one_sess(sess_curr, sub_curr, params, verbose=False):
    """
    launch runs processing for sess_curr 

    parameters:
    -----------
    sess_curr: dict 
            contains sess base directory
            contains sess index 
    params: dict
            parameters for layout, data and analysis
    """            

    sess_idx = sess_curr['sess_idx']
    sess_dir = sess_curr['sess_dir']
    sub_idx = sub_curr['sub_idx']
    nb_runs = params['data']['nb_run'] 
    assert nb_runs == 4 # 4debug

    dlayo = params['layout']

    runs_dir = osp.join(sess_dir, dlayo['dir']['runs']) # should be preproc
    sess_curr['dir_runs'] = runs_dir

    dir_smooth_imgs = osp.join(runs_dir, dlayo['dir']['smooth'])
    sess_curr['dir_smooth_imgs'] = dir_smooth_imgs

    sess_curr['droi'] = osp.join(runs_dir, dlayo['atlas']['dir']) # 'registered_files'
    sess_curr['roi_prefix'] = dlayo['atlas']['prepat']            # 'rraal_*.nii'     
    sess_curr['dsig'] = osp.join(runs_dir, dlayo['out']['signals']['dir']) 

    save_is_true = params['analysis']['write_signals']
    if save_is_true: 
        # rm existing and recreate signal directory
        suf.rm_and_create(sess_curr['dsig'])

    sess_curr['dreal'] = osp.join(runs_dir, dlayo['dir']['realign'])

    #- csf dir and file
    sess_curr['csf_dir'] = osp.join(runs_dir, dlayo['csf']['dir'])
    csf_file = gb.glob(osp.join(sess_curr['csf_dir'], dlayo['csf']['roi_mask']))
    if not csf_file: print("glob empty: {} {}".format(
                                sess_curr['csf_dir'], dlayo['csf']['roi_mask']))
    csf_file = suf._check_glob_res(csf_file, ensure=1, files_only=True)
    sess_curr['csf_filename'] =  dlayo['csf']['roi_mask']

    #- wm dir and file
    sess_curr['wm_dir'] = osp.join(runs_dir, dlayo['wm']['dir'])
    wm_file = gb.glob(osp.join(sess_curr['wm_dir'], dlayo['wm']['roi_mask']))
    if not wm_file: print("glob empty: {} {}".format(
                                sess_curr['wm_dir'], dlayo['wm']['roi_mask']))
    wm_file = suf._check_glob_res(wm_file, ensure=1, files_only=True)
    sess_curr['wm_filename'] =  dlayo['wm']['roi_mask']

    #- Get runs' filenames
    #------------------------
    pat_imgs_files = dlayo['pat']['sub+sess+run+']+"*.nii*"
                                # requires idx for sub, sess and run
    runs_pat = [pat_imgs_files.format(sub_idx, sess_idx, run_idx) \
                                        for run_idx in range(1, nb_runs+1)]
                                # /!\  start idx at 1 requires nb_runs+1  /!\
    runs = [gb.glob(osp.join(dir_smooth_imgs, pat)) for pat in runs_pat]
    # /!\ATTENTION:/!\ must sort the files with filename, should sort in time
    for run in runs: run.sort()
    sess_curr['runs'] = runs
    
    # compute session wide mask
    #-----------------------------------------------------
    # compute_epi_mask(runs[0], opening=1, connected=True)
    dir_mask = osp.join(runs_dir, dlayo['out']['sess_mask']['dir'])
    sess_curr['mask_dir'] = dir_mask
    sess_curr['mask_filename'] = dlayo['out']['sess_mask']['roi_mask']

    sess_mask = None
    # TODO : separate compute mask and apply
    if params['analysis']['apply_sess_mask']:
        sess_mask = msk.compute_multi_epi_mask(runs, lower_cutoff=0.2, 
                    upper_cutoff=0.85, connected=True, opening=2, threshold=0.5)
        suf.rm_and_create(dir_mask)
        sess_mask.to_filename(osp.join(sess_curr['mask_dir'], sess_curr['mask_filename']))

    sess_curr['mask'] = sess_mask

    # TODO
    # check mask is reasonable - how ???

    # - mvt file
    # example : mvtfile = osp.join(dreal,'rp_asub01_sess01_run01-0006.txt')
    # /!\ will always be run01 for spm /!\
    mvtpat = dlayo['spm_mvt']['mvtrun1+'].format(sub_idx, sess_idx) 
    mvtfile = gb.glob(osp.join(sess_curr['dreal'], mvtpat))
    mvtfile = suf._check_glob_res(mvtfile, ensure=1, files_only=True)
    sess_curr['mvtfile'] = mvtfile

    # - parameter file for condition names
    param_pattern = (dlayo['pat']['sub+sess+']).format(sub_idx, sess_idx)
    paramfile = gb.glob(osp.join(sess_dir, param_pattern + "params"))
    paramfile = suf._check_glob_res(paramfile, ensure=1, files_only=True)
    with open(paramfile) as fparam:
         sess_param = json.load(fparam)

    runs_info = {}
    run_curr = {}
    for idx_run, run in enumerate(runs, 1): # /!\ starts at 1 /!\
        run_curr['run_idx'] = idx_run
        run_curr['file_names'] = run
        if verbose: print('\n' + '---'*9  + "\n" + "run{:02d}".format(idx_run))
        # TODO : fix this to have sess_param return motion['run1']='HIGH' etc
        run_curr['motion'] = sess_param['motion'][idx_run-1] # sess_param['motion'] is 0 based

        runs_info["run{:02d}".format(idx_run)] = \
                    do_one_run(run_curr, sess_curr, sub_curr, params, verbose=verbose)

    return runs_info
Пример #9
0
def l2_two_sample_ttest(images, labels, outputdir, group_names=('group 1', 'group 2'), workingdir='/data/nipypes',
                        covariates=None, logging=False, autorun=False, multiproc=True, keep_cache=False):
    """

    :param images (mandatory): list of Nifti objects
    :param labels (mandatory): numpy array of 1's and 2's
    :param outputdir (mandatory'): output directory
    :param group_names (optional, default=('group 1', 'group 2')): tuple with group names
    :param workingdir (optional, default='/data/nipypes'): nipype working directory
    :param covariates (optional, default=None): list of covariate dictionaries
    :param logging (optional, default=False): boolean
    :param autorun (optional, default=False): run workflow
    :param keep_cache (optional, default=False): keep nipype cache
    :return: instance of nipype.pipeline.engine.Workflow
    """

    if not os.path.exists(outputdir):
        print("Creating output directory % s" % outputdir)
        os.mkdir(outputdir)

    # workflow
    wf = pe.Workflow(name=os.path.basename(os.path.normpath(workingdir)))
    wf.config['execution'] = {'hash_method': 'content',  # 'timestamp' or 'content'
                              'single_thread_matlab': 'False',
                              'poll_sleep_duration': '5',
                              'stop_on_first_crash': 'False',
                              'stop_on_first_rerun': 'False'}
    if logging:
        wf.config['logging'] = {'log_directory': outputdir,
                                'log_to_file': 'True'}
    wf.base_dir = os.path.dirname(os.path.normpath(workingdir))

    # create mask if it does not exist
    if not os.path.exists(os.path.join(outputdir, 'mask.nii')):
        print("Creating mask file% s" % os.path.join(outputdir, 'mask.nii'))
        from nilearn.masking import compute_multi_epi_mask
        import nibabel

        mask = compute_multi_epi_mask(images)
        mask.set_data_dtype(float)
        nibabel.save(mask, os.path.join(outputdir, 'mask.nii'))

    # model design
    ttest_design = pe.Node(interface=spm.TwoSampleTTestDesign(), name="ttest_design")
    ttest_design.inputs.group1_files = [img for i, img in enumerate(images) if labels[i] == 1]
    ttest_design.inputs.group2_files = [img for i, img in enumerate(images) if labels[i] == 2]
    ttest_design.inputs.explicit_mask_file = os.path.join(outputdir, 'mask.nii')
    if covariates is not None:
        ttest_design.inputs.covariates = covariates
    ttest_design.inputs.threshold_mask_none = True
    ttest_design.inputs.group_contrast = True

    # model estimation
    ttest_estimate = pe.Node(interface=spm.EstimateModel(), name="ttest_estimate")
    ttest_estimate.inputs.estimation_method = {'Classical': 1}

    # contrasts
    ttest_contrast = pe.Node(interface=spm.EstimateContrast(), name="ttest_contrast")
    con_1 = (group_names[0], 'T', ['Group_{1}'], [1])
    con_2 = (group_names[1], 'T', ['Group_{2}'], [1])
    con_3 = ('%s > %s' % tuple(group_names), 'T', ['Group_{1}', 'Group_{2}'], [1, -1])
    con_4 = ('%s < %s' % tuple(group_names), 'T', ['Group_{1}', 'Group_{2}'], [-1, 1])
    ttest_contrast.inputs.contrasts = [con_1, con_2, con_3, con_4]
    if covariates is not None:
        for cov in covariates:
            ttest_contrast.inputs.contrasts += [('+' + cov['name'], 'T', [cov['name']], [1])]
            ttest_contrast.inputs.contrasts += [('-' + cov['name'], 'T', [cov['name']], [-1])]

    # save data
    datasink = pe.Node(DataSink(base_directory=outputdir), name="datasink")

    # connect nodes
    wf.connect([(ttest_design, ttest_estimate, [('spm_mat_file', 'spm_mat_file')]),
                (ttest_estimate, ttest_contrast, [('spm_mat_file', 'spm_mat_file'),
                                                  ('beta_images', 'beta_images'),
                                                  ('residual_image', 'residual_image')]),
                (ttest_estimate, datasink, [('residual_image', '@res'),
                                            ('beta_images', '@beta'),
                                            ('RPVimage', '@RPV')]),
                (ttest_contrast, datasink, [('spm_mat_file', '@SPM'),
                                            ('spmT_images', '@T'),
                                            ('con_images', '@con')]),
                ])

    if autorun:
        if multiproc:
            import sys

            if not hasattr(sys.stdin, 'close'):
                def dummy_close():
                    pass

                sys.stdin.close = dummy_close

            wf.run('MultiProc')
        else:
            wf.run()
        print('Group level statistics saved to ' + outputdir)
        print("finished!")

    if not keep_cache:
        import shutil

        shutil.rmtree(os.path.join(workingdir))

    return wf