Example #1
0
def compare_ieeg_freq(file_A, file_B, frequency, baseline, merge_method, measure,
                      output_dir):
    """
    Parameters
    ----------
    baseline : bool
        if you want to substract baseline
    merge_method : str
        "dh2012"
    measure : str
        "dh2012_r2"
    """
    ieeg_A = file_Core(file_A)
    ieeg_B = file_Core(file_B)

    with file_A.open('rb') as f:
        dat_A = load(f)
    with file_B.open('rb') as f:
        dat_B = load(f)

    if baseline:
        dat_A, dat_B = correct_baseline(dat_A, dat_B, frequency)

    hfa_A = merge(dat_A, merge_method, frequency)
    hfa_B = merge(dat_B, merge_method, frequency)

    if measure == 'diff':
        ecog_stats = compute_diff(hfa_A, hfa_B)
    elif measure == 'percent':
        ecog_stats = compute_percent(hfa_A, hfa_B)
    elif measure in ('zstat', 'dh2012_t'):  # identical
        ecog_stats = compute_zstat(hfa_A, hfa_B)
        if measure == 'dh2012_t':
            ecog_stats.data[0] *= -1  # opposite sign in dh2012's script

    elif measure == 'dh2012_r2':
        ecog_stats = calc_dh2012_values(hfa_A, hfa_B, measure)

    # need to check pvalues
    if True:
        pvalues = calc_dh2012_values(hfa_A, hfa_B, 'dh2012_pv')
    else:
        pvalues = [NaN, ] * ecog_stats.number_of('chan')[0]

    output = file_Core(
        subject=ieeg_A.subject,
        session=ieeg_A.session,
        run=ieeg_A.run,
        acquisition=ieeg_A.acquisition,
        modality='compare',
        extension='.tsv',
        task=find_longest_match(ieeg_A.task, ieeg_B.task),
        )
    compare_file = output_dir / output.get_filename()
    with compare_file.open('w') as f:
        f.write('channel\tmeasure\tpvalue\n')
        for i, chan in enumerate(ecog_stats.chan[0]):
            f.write(f'{chan}\t{ecog_stats(trial=0, chan=chan)}\t{pvalues(trial=0, chan=chan)}\n')

    return compare_file
Example #2
0
def xmain(analysis_dir,
          freesurfer_dir,
          output_dir,
          modality='compare',
          surface='white',
          surf_fwhm=0):
    """
    map feat values on freesurfer surface',

    Parameters
    ----------
    analysis_dir : path

    freesurfer_dir : path

    output_dir : path

    modality : str
        "compare"
    surface : str
        "white", "pial"
    surf_fwhm : float
        FWHM
    """
    p_all = []
    surfs = []
    for in_vol_file in find_in_bids(analysis_dir,
                                    generator=True,
                                    extension='.nii.gz',
                                    modality=modality):
        in_vol = file_Core(in_vol_file)
        feat_path = find_in_bids(analysis_dir,
                                 subject=in_vol.subject,
                                 extension='.feat')
        for hemi in ('lh', 'rh'):
            p, out_surf = vol2surf(in_vol, feat_path, freesurfer_dir, hemi,
                                   surface, surf_fwhm)
            p_all.append(p)
            surfs.append(out_surf)

    # wait for all processes to run
    [p.wait() for p in p_all]
    [check_subprocess(p) for p in p_all]
    [info['mri_nonan'].unlink() for info in surfs]

    img_dir = output_dir / SURF_DIR
    rmtree(img_dir, ignore_errors=True)
    img_dir.mkdir(exist_ok=True, parents=True)

    for one_surf in surfs:
        plot_surf(img_dir, freesurfer_dir, one_surf, surface)
Example #3
0
def import_df_regions(parameters):
    regions_dir = name(parameters, 'brainregions_dir')

    all_df = []
    for tsv_file in regions_dir.glob('*_brainregions.tsv'):

        bids = file_Core(tsv_file.name)

        temp = read_csv(tsv_file, sep='\t')
        temp['subject'] = bids.subject
        temp['session'] = bids.session
        temp['acquisition'] = bids.acquisition
        all_df.append(temp)

    regions = concat(all_df)
    regions.drop(['x', 'y', 'z'], axis=1, inplace=True)
    return regions
Example #4
0
def _compute_summary(in_files, output_dir):

    summary_file = output_dir / 'summary_per_subject.tsv'

    with summary_file.open('w') as f:

        f.write(f'subject\tsession\ttask\tacquisition\tsize_at_peak\tr2_at_peak\tslope_at_peak\tintercept_at_peak\tsize_at_concave\tr2_at_concave\tdiff_r2\n')

        for corr_file in in_files:

            corr_tsv = read_tsv(corr_file)
            size_max, r2_max, slope, intercept = corr_tsv[argmax(corr_tsv['Rsquared'])]

            deriv = gradient(gradient(corr_tsv['Rsquared']))
            size_deriv, r2_deriv, *dummy = corr_tsv[argmin(deriv)]

            file_info = file_Core(corr_file)

            f.write(f'{file_info.subject}\t{file_info.session}\t{file_info.task}\t{file_info.acquisition}\t{size_max}\t{r2_max}\t{slope}\t{intercept}\t{size_deriv}\t{r2_deriv}\t{r2_max - r2_deriv}\n')
Example #5
0
def import_df_ols(parameters):
    """Compute onset as well"""
    TSV_DIR = name(parameters, 'ols_tsv')

    all_ols = []
    for tsv_file in TSV_DIR.glob('*.tsv'):
        bids = file_Core(tsv_file.name)
        ols = read_csv(tsv_file, sep='\t')
        ols['subject'] = bids.subject
        ols['session'] = bids.session
        ols['run'] = bids.run
        ols['acquisition'] = bids.acquisition
        all_ols.append(ols)

    ols = concat(
        all_ols,
        sort=False)  # pandas throws a warning when data is not complete

    return ols
Example #6
0
def make_name(filename, event_type, ext='.html'):
    """Create name based on the data file name

    Parameters
    ----------
    filename : str
        filename of the dataset of interest
    event_type : str
        event type used to identify the trials (one of 'cues', 'open', 'close',
        'movements', 'extension', 'flexion')
    ext : str
        extension of the file

    Returns
    -------
    str
        file name specific to this filename and event type
    """
    f = file_Core(filename)
    if f.acquisition is None:
        acq = ''
    else:
        acq = '_{f.acquisition}'
    return f'{f.subject}_run-{f.run}{acq}_{event_type}{ext}'
Example #7
0
from bidso import file_Core

subject = 'bert'

task_ieeg = file_Core(
    subject=subject,
    session='day02',
    modality='ieeg',
    task='motor',
    run='1',
    acquisition='clinical',
    extension='.eeg',
)

task_fmri = file_Core(
    subject=subject,
    session='day01',
    modality='bold',
    task='motor',
    run='1',
    extension='.nii.gz',
)

task_anat = file_Core(
    subject=subject,
    session='day01',
    acquisition='wholebrain',
    modality='T1w',
    extension='.nii.gz',
)
Example #8
0
TEST_PATH = Path(__file__).resolve().parent
DATA_PATH = TEST_PATH / 'data'

BIDS_PATH = DATA_PATH / 'bids'
FREESURFER_PATH = DATA_PATH / 'freesurfer'

ANALYSIS_PATH = DATA_PATH / 'analysis'
ANALYSIS_PATH.mkdir(parents=True, exist_ok=True)

subject = 'delft'
task_ieeg = file_Core(
    subject=subject,
    session='UMCUECOGday01',
    modality='ieeg',
    task='motorHandLeft',
    run='1',
    acquisition='clinical',
    extension='.eeg',
    )
task_fmri = file_Core(
    subject=subject,
    session='UMCU3Tdaym13',
    modality='bold',
    task='motorHandLeft',
    run='1',
    extension='.nii.gz',
    )
task_anat = file_Core(
    subject=subject,
    session='UMCU3Tdaym13',