Exemplo n.º 1
0
def _bids2nipypeinfo(in_file,
                     events_file,
                     regressors_file,
                     regressors_names=None,
                     motion_columns=None,
                     decimals=3,
                     amplitude=1.0):
    from pathlib import Path
    import numpy as np
    import pandas as pd
    from nipype.interfaces.base.support import Bunch
    removeTR = 4
    # Process the events file
    events = pd.read_csv(events_file, sep=r'\s+')

    bunch_fields = ['onsets', 'durations', 'amplitudes']

    if not motion_columns:
        from itertools import product
        motion_columns = [
            '_'.join(v) for v in product(('trans', 'rot'), 'xyz')
        ]

    out_motion = Path('motion.par').resolve()

    regress_data = pd.read_csv(regressors_file, sep=r'\s+')
    np.savetxt(out_motion, regress_data[motion_columns].values, '%g')
    if regressors_names is None:
        regressors_names = sorted(
            set(regress_data.columns) - set(motion_columns))

    if regressors_names:
        bunch_fields += ['regressor_names']
        bunch_fields += ['regressors']

    runinfo = Bunch(scans=in_file,
                    conditions=list(set(events.trial_type_N.values)),
                    **{k: []
                       for k in bunch_fields})

    for condition in runinfo.conditions:
        event = events[events.trial_type_N.str.match(condition)]

        runinfo.onsets.append(
            np.round(event.onset.values - removeTR, 3).tolist()
        )  # added -removeTR to align to the onsets after removing X number of TRs from the scan
        runinfo.durations.append(np.round(event.duration.values, 3).tolist())
        if 'amplitudes' in events.columns:
            runinfo.amplitudes.append(
                np.round(event.amplitudes.values, 3).tolist())
        else:
            runinfo.amplitudes.append([amplitude] * len(event))

    if 'regressor_names' in bunch_fields:
        runinfo.regressor_names = regressors_names
        runinfo.regressors = regress_data[regressors_names].fillna(0.0).values[
            removeTR:, ].T.tolist()  # adding removeTR to cut the first rows

    return [runinfo], str(out_motion)
def _bids2nipypeinfo(in_file, events_file, regressors_file,
                     regressors_names=None,
                     motion_columns=None,
                     decimals=3, amplitude=1.0, del_scan=10):
    from pathlib import Path
    import numpy as np
    import pandas as pd
    from nipype.interfaces.base.support import Bunch

    # Process the events file
    events = pd.read_csv(events_file, sep=r'\s+')

    bunch_fields = ['onsets', 'durations', 'amplitudes']

    if not motion_columns:
        from itertools import product
        motion_columns = ['_'.join(v) for v in product(('trans', 'rot'), 'xyz')]

    out_motion = Path('motion.par').resolve()
    
    regress_data = pd.read_csv(regressors_file, sep=r'\s+')
    np.savetxt(out_motion, regress_data[motion_columns].fillna(0.0).values[del_scan:,], '%g')
#     np.savetxt(out_motion, regress_data[motion_columns].fillna(0.0).values, '%g')
    
    if regressors_names is None:
        regressors_names = sorted(set(regress_data.columns) - set(motion_columns))

    if regressors_names:
        bunch_fields += ['regressor_names']
        bunch_fields += ['regressors']

    domain = list(set(events.condition.values))[0] # domain of this task run, should be only one, 'Mon' or 'Med'
    trial_types = list(set(events.trial_type.values))
    
    # add parametric modulator
    bunch_fields += ['pmod']
        
    runinfo = Bunch(
        scans=in_file,
        conditions=[domain + '_' + trial_type for trial_type in trial_types],
        # conditions = ['Med_amb', 'Med_risk', 'Mon_amb', 'Mon_risk'],
        **{k: [] for k in bunch_fields})
        
    for condition in runinfo.conditions:        
        
        event = events[events.trial_type.str.match(condition[4:])]
        runinfo.onsets.append(np.round(event.onset.values - del_scan + 1, 3).tolist()) # take out the first several deleted scans
        runinfo.durations.append(np.round(event.duration.values, 3).tolist())
        
        # parametric modulator
        runinfo.pmod.append(Bunch(
                name = [condition + '_reward_prob'], #name of modulator for each condition
                param = [np.round(event.reward_prob.values, 3).tolist()], # values of modulator for each condition
                poly = [1] #degree of modulation, 1-linear
                ))
        
        if 'amplitudes' in events.columns:
            runinfo.amplitudes.append(np.round(event.amplitudes.values, 3).tolist())
        else:
            runinfo.amplitudes.append([amplitude] * len(event))

    # response predictor regardless of condition
    runinfo.conditions.append('Resp')
    
    # response predictor when there is a button press
    resp_mask = events.resp != 2    
    resp_onset= np.round(events.resp_onset.values[resp_mask] - del_scan + 1, 3).tolist()
    runinfo.onsets.append(resp_onset)
    runinfo.durations.append([0] * len(resp_onset))
    runinfo.amplitudes.append([amplitude] * len(resp_onset))
    
    # no parametric modulator for response
    runinfo.pmod.append(None)
            
           
    if 'regressor_names' in bunch_fields:
        runinfo.regressor_names = regressors_names
        runinfo.regressors = regress_data[regressors_names].fillna(0.0).values[del_scan:,].T.tolist()
    
    return runinfo, str(out_motion)
Exemplo n.º 3
0
        from itertools import product
        motion_columns = ['_'.join(v) for v in product(('trans', 'rot'), 'xyz')]

    out_motion = Path('motion.par').resolve()

    regress_data = pd.read_csv(regressors_file, sep=r'\s+')
    np.savetxt(out_motion, regress_data[motion_columns].values, '%g')
    if regressors_names is None:
        regressors_names = sorted(set(regress_data.columns) - set(motion_columns))

    if regressors_names:
        bunch_fields += ['regressor_names']
        bunch_fields += ['regressors']

    runinfo = Bunch(
        scans=in_file,
        conditions=list(set(events.trial_type.values)),
        **{k: [] for k in bunch_fields})

    for condition in runinfo.conditions:
        event = events[events.trial_type.str.match(condition)]

        runinfo.onsets.append(np.round(event.onset.values, 3).tolist())
        runinfo.durations.append(np.round(event.duration.values, 3).tolist())
        if 'amplitudes' in events.columns:
            runinfo.amplitudes.append(np.round(event.amplitudes.values, 3).tolist())
        else:
            runinfo.amplitudes.append([amplitude] * len(event))

    if 'regressor_names' in bunch_fields:
        runinfo.regressor_names = regressors_names
        runinfo.regressors = regress_data[regressors_names].fillna(0.0).values.T.tolist()
Exemplo n.º 4
0
def _bids2nipypeinfo(in_file,
                     events_file,
                     regressors_file,
                     regressors_names=None,
                     motion_columns=None,
                     decimals=3,
                     amplitude=1.0,
                     del_scan=10):
    from pathlib import Path
    import numpy as np
    import pandas as pd
    from nipype.interfaces.base.support import Bunch

    # Process the events file
    events = pd.read_csv(events_file, sep=r'\s+')

    bunch_fields = ['onsets', 'durations', 'amplitudes']

    if not motion_columns:
        from itertools import product
        motion_columns = [
            '_'.join(v) for v in product(('trans', 'rot'), 'xyz')
        ]

    out_motion = Path('motion.par').resolve()

    regress_data = pd.read_csv(regressors_file, sep=r'\s+')
    np.savetxt(out_motion,
               regress_data[motion_columns].fillna(0.0).values[del_scan:, ],
               '%g')
    #     np.savetxt(out_motion, regress_data[motion_columns].fillna(0.0).values, '%g')

    if regressors_names is None:
        regressors_names = sorted(
            set(regress_data.columns) - set(motion_columns))

    if regressors_names:
        bunch_fields += ['regressor_names']
        bunch_fields += ['regressors']

    domain = list(set(events.condition.values))[
        0]  # domain of this task run, should be only one, 'Mon' or 'Med'
    trial_types = list(set(events.trial_type.values))
    #    outcome_levels = list(set(events.vals.values))
    outcome_levels = {'0': 5, '1': 8, '2': 12, '3': 25}
    conds = [domain + '_' + trial_type
             for trial_type in trial_types]  # e.g. ['Med_risk', 'Med_ambig']
    conditions = []
    for cond in conds:
        for outcome_level in list(outcome_levels.keys()):
            conditions.append(cond + '_' + outcome_level)

    runinfo = Bunch(
        scans=in_file,
        conditions=conditions,  # should be 8 conditions (2 x 4)
        # conditions = ['Med_amb', 'Med_risk', 'Mon_amb', 'Mon_risk'],
        **{k: []
           for k in bunch_fields})

    for condition in runinfo.conditions:

        event = events[
            (events.trial_type.str.match(condition[4:len(condition) - 2]))
            & (events.vals == outcome_levels[condition[-1:]])]
        runinfo.onsets.append(
            np.round(event.onset.values - del_scan + 1,
                     3).tolist())  # take out the first several deleted scans
        runinfo.durations.append(np.round(event.duration.values, 3).tolist())
        if 'amplitudes' in events.columns:
            runinfo.amplitudes.append(
                np.round(event.amplitudes.values, 3).tolist())
        else:
            runinfo.amplitudes.append([amplitude] * len(event))

        # if domain == condition[:3]:
        #     event = events[events.trial_type.str.match(condition[4:])]
        #     runinfo.onsets.append(np.round(event.onset.values - del_scan + 1, 3).tolist()) # take out the first several deleted scans
        #     runinfo.durations.append(np.round(event.duration.values, 3).tolist())
        #     if 'amplitudes' in events.columns:
        #         runinfo.amplitudes.append(np.round(event.amplitudes.values, 3).tolist())
        #     else:
        #         runinfo.amplitudes.append([amplitude] * len(event))

        # else: # empty conditions
        #     runinfo.onsets.append([])
        #     runinfo.durations.append([])
        #     runinfo.amplitudes.append([])

    # delete empty condition, if any
    cond_idx = 0
    while cond_idx < len(runinfo.conditions):
        if not runinfo.onsets[cond_idx]:
            runinfo.conditions.pop(cond_idx)
            runinfo.onsets.pop(cond_idx)
            runinfo.durations.pop(cond_idx)
            runinfo.amplitudes.pop(cond_idx)
        else:
            cond_idx += 1

    # response predictor regardless of condition
    runinfo.conditions.append('Resp')

    # response predictor when there is a button press
    resp_mask = events.resp != 2
    resp_onset = np.round(events.resp_onset.values[resp_mask] - del_scan + 1,
                          3).tolist()
    runinfo.onsets.append(resp_onset)
    runinfo.durations.append([0] * len(resp_onset))
    runinfo.amplitudes.append([amplitude] * len(resp_onset))

    if 'regressor_names' in bunch_fields:
        runinfo.regressor_names = regressors_names
        runinfo.regressors = regress_data[regressors_names].fillna(0.0).values[
            del_scan:, ].T.tolist()

    return runinfo, str(out_motion)
Exemplo n.º 5
0
from nipype.interfaces.base.support import Bunch

from ..path import findpaths

A = "/tmp/a.txt"  # TODO make this more elegant with a tmp_dir
B = "/tmp/b.txt"


@pytest.mark.timeout(60)
@pytest.mark.parametrize("obj", [[A, B], (A, B), {A, B}, {
    "a": A,
    "b": B
}, {
    "x": {
        "y": [A, B]
    }
},
                                 Bunch(a=A, b=B),
                                 Bunch(x=[A, B])])
def test_findpaths(tmp_path, obj):
    os.chdir(str(tmp_path))

    for fname in [A, B]:
        Path(fname).touch()

    assert set(findpaths(obj)) == set([A, B])

    for fname in [A, B]:
        Path(fname).unlink()
Exemplo n.º 6
0
def _bids2nipypeinfo(in_file,
                     events_file,
                     regressors_file,
                     regressors_names=None,
                     motion_columns=None,
                     decimals=3,
                     amplitude=1.0,
                     del_scan=10):
    from pathlib import Path
    import numpy as np
    import pandas as pd
    from nipype.interfaces.base.support import Bunch

    # Process the events file
    #    events_file = '/home/rj299/project/mdm_analysis/output/event_files/sub-2073_task-4_cond_v3.csv'

    events = pd.read_csv(events_file, sep=r'\s+')

    bunch_fields = ['onsets', 'durations', 'amplitudes']

    if not motion_columns:
        from itertools import product
        motion_columns = [
            '_'.join(v) for v in product(('trans', 'rot'), 'xyz')
        ]

    out_motion = Path('motion.par').resolve()

    regress_data = pd.read_csv(regressors_file, sep=r'\s+')
    np.savetxt(out_motion,
               regress_data[motion_columns].fillna(0.0).values[del_scan:, ],
               '%g')
    #     np.savetxt(out_motion, regress_data[motion_columns].fillna(0.0).values, '%g')

    if regressors_names is None:
        regressors_names = sorted(
            set(regress_data.columns) - set(motion_columns))

    if regressors_names:
        bunch_fields += ['regressor_names']
        bunch_fields += ['regressors']

    domain = list(set(events.condition.values))[
        0]  # domain of this task run, should be only one, 'Mon' or 'Med'
    trial_types = list(set(events.trial_type.values))
    trial_types.sort()  # separate trials by reward magnitude

    # read unique risk levels
    risk_levels_percent = events.probs.values * 100
    risk_levels = list(set(risk_levels_percent.astype(int)))
    risk_levels.sort()

    # read unique ambiguity levels
    amb_levels_percent = events.ambigs.values * 100
    amb_levels = list(set(amb_levels_percent.astype(int)))
    amb_levels.sort()
    amb_levels.pop(
        0)  # get rid of the ambgiuity 0 level (because those are risky trials)

    runinfo = Bunch(
        scans=in_file,
        conditions=[
            domain + '_' + str(trial_types[0]) + '_' + str(amb_level)
            for amb_level in amb_levels
        ] + [
            domain + '_' + str(trial_types[1]) + '_' + str(risk_level)
            for risk_level in risk_levels
        ],

        # conditions = ['Med_amb_24', 'Med_amb_50', 'Med_amb_74',
        #               'Med_risk_25', 'Med_risk_50','Med_risk_75',
        #               'Mon_amb_24', 'Mon_amb_50', 'Mon_amb_74',
        #               'Mon_risk_25', 'Mon_risk_50','Mon_risk_75']
        **{k: []
           for k in bunch_fields})

    for condition in runinfo.conditions:

        if condition[4:-3] == 'amb':
            event = events[(events.trial_type == condition[4:-3])
                           & (events.ambigs * 100 == int(condition[-2:]))]
        else:
            event = events[(events.trial_type == condition[4:-3])
                           & (events.probs * 100 == int(condition[-2:]))]

        runinfo.onsets.append(
            np.round(event.onset.values - del_scan + 1,
                     3).tolist())  # take out the first several deleted scans
        runinfo.durations.append(np.round(event.duration.values, 3).tolist())

        if 'amplitudes' in events.columns:
            runinfo.amplitudes.append(
                np.round(event.amplitudes.values, 3).tolist())
        else:
            runinfo.amplitudes.append([amplitude] * len(event))

    # response predictor regardless of condition
    runinfo.conditions.append('Resp')

    #     response predictor when there is a button press
    resp_mask = events.resp != 2
    resp_onset = np.round(events.resp_onset.values[resp_mask] - del_scan + 1,
                          3).tolist()
    runinfo.onsets.append(resp_onset)
    runinfo.durations.append([0] * len(resp_onset))
    runinfo.amplitudes.append([amplitude] * len(resp_onset))

    # delete empty condition
    runinfo.conditions = [
        x for (x, y) in zip(runinfo.conditions, runinfo.amplitudes) if y
    ]
    runinfo.amplitudes = [x for x in runinfo.amplitudes if x]
    runinfo.durations = [x for x in runinfo.durations if x]
    runinfo.onsets = [x for x in runinfo.onsets if x]

    # if domain == condition[:3]:
    #     event = events[events.trial_type.str.match(condition[4:])]
    #     runinfo.onsets.append(np.round(event.onset.values - del_scan + 1, 3).tolist()) # take out the first several deleted scans
    #     runinfo.durations.append(np.round(event.duration.values, 3).tolist())
    #     if 'amplitudes' in events.columns:
    #         runinfo.amplitudes.append(np.round(event.amplitudes.values, 3).tolist())
    #     else:
    #         runinfo.amplitudes.append([amplitude] * len(event))

    # else: # empty conditions
    #     runinfo.onsets.append([])
    #     runinfo.durations.append([])
    #     runinfo.amplitudes.append([])

    if 'regressor_names' in bunch_fields:
        runinfo.regressor_names = regressors_names
        runinfo.regressors = regress_data[regressors_names].fillna(0.0).values[
            del_scan:, ].T.tolist()

    return runinfo, str(out_motion)