Exemple #1
0
def test_csvReader():
    header = "files,labels,erosion\n"
    lines = ["foo,hello,300.1\n", "bar,world,5\n", "baz,goodbye,0.3\n"]
    for x in range(2):
        fd, name = mkstemp(suffix=".csv")
        with open(name, 'w') as fid:
            reader = utility.CSVReader()
            if x % 2 == 0:
                fid.write(header)
                reader.inputs.header = True
            fid.writelines(lines)
            fid.flush()
            reader.inputs.in_file = name
            out = reader.run()
            if x % 2 == 0:
                yield assert_equal, out.outputs.files, ['foo', 'bar', 'baz']
                yield assert_equal, out.outputs.labels, [
                    'hello', 'world', 'goodbye'
                ]
                yield assert_equal, out.outputs.erosion, ['300.1', '5', '0.3']
            else:
                yield assert_equal, out.outputs.column_0, ['foo', 'bar', 'baz']
                yield assert_equal, out.outputs.column_1, [
                    'hello', 'world', 'goodbye'
                ]
                yield assert_equal, out.outputs.column_2, ['300.1', '5', '0.3']
        os.unlink(name)
Exemple #2
0
def test_csvReader(tmpdir):
    header = "files,labels,erosion\n"
    lines = ["foo,hello,300.1\n", "bar,world,5\n", "baz,goodbye,0.3\n"]
    for x in range(2):
        name = str(tmpdir.join("testfile.csv"))
        with open(name, 'w') as fid:
            reader = utility.CSVReader()
            if x % 2 == 0:
                fid.write(header)
                reader.inputs.header = True
            fid.writelines(lines)
            fid.flush()
            reader.inputs.in_file = name
            out = reader.run()
            if x % 2 == 0:
                assert out.outputs.files == ['foo', 'bar', 'baz']
                assert out.outputs.labels == ['hello', 'world', 'goodbye']
                assert out.outputs.erosion == ['300.1', '5', '0.3']
            else:
                assert out.outputs.column_0 == ['foo', 'bar', 'baz']
                assert out.outputs.column_1 == ['hello', 'world', 'goodbye']
                assert out.outputs.column_2 == ['300.1', '5', '0.3']
Exemple #3
0
def test_csvReader(tmpdir):
    header = "files,labels,erosion\n"
    lines = ["foo,hello,300.1\n", "bar,world,5\n", "baz,goodbye,0.3\n"]
    for x in range(2):
        name = tmpdir.join("testfile.csv").strpath
        with open(name, "w") as fid:
            reader = utility.CSVReader()
            if x % 2 == 0:
                fid.write(header)
                reader.inputs.header = True
            fid.writelines(lines)
            fid.flush()
            reader.inputs.in_file = name
            out = reader.run()
            if x % 2 == 0:
                assert out.outputs.files == ["foo", "bar", "baz"]
                assert out.outputs.labels == ["hello", "world", "goodbye"]
                assert out.outputs.erosion == ["300.1", "5", "0.3"]
            else:
                assert out.outputs.column_0 == ["foo", "bar", "baz"]
                assert out.outputs.column_1 == ["hello", "world", "goodbye"]
                assert out.outputs.column_2 == ["300.1", "5", "0.3"]
Exemple #4
0
def run_workflow(csv_file, use_pbs, contrasts_name, template):
    workflow = pe.Workflow(name='run_level1flow')
    workflow.base_dir = os.path.abspath('./workingdirs')

    from nipype import config, logging
    config.update_config({
        'logging': {
            'log_directory': os.path.join(workflow.base_dir, 'logs'),
            'log_to_file': True,
            'workflow_level': 'DEBUG',
            'interface_level': 'DEBUG',
        }
    })
    logging.update_logging(config)

    config.enable_debug_mode()

    # redundant with enable_debug_mode() ...
    workflow.stop_on_first_crash = True
    workflow.remove_unnecessary_outputs = False
    workflow.keep_inputs = True
    workflow.hash_method = 'content'
    """
    Setup the contrast structure that needs to be evaluated. This is a list of
    lists. The inner list specifies the contrasts and has the following format:
    [Name,Stat,[list of condition names],[weights on those conditions]. The
    condition names must match the `names` listed in the `evt_info` function
    described above.
    """

    try:
        import importlib
        mod = importlib.import_module('contrasts.' + contrasts_name)
        contrasts = mod.contrasts
        # event_names = mod.event_names
    except ImportError:
        raise RuntimeError('Unknown contrasts: %s. Must exist as a Python'
                           ' module in contrasts directory!' % contrasts_name)

    modelfit = create_workflow(contrasts)

    import bids_templates as bt

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
    ]),
                        name='input')

    assert csv_file is not None, "--csv argument must be defined!"

    reader = niu.CSVReader()
    reader.inputs.header = True
    reader.inputs.in_file = csv_file
    out = reader.run()
    subject_list = out.outputs.subject
    session_list = out.outputs.session
    run_list = out.outputs.run

    inputnode.iterables = [
        ('subject_id', subject_list),
        ('session_id', session_list),
        ('run_id', run_list),
    ]
    inputnode.synchronize = True

    templates = {
        'funcs':
        'derivatives/featpreproc/highpassed_files/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}*_bold_res-1x1x1_preproc_*.nii.gz',

        # 'funcmasks':
        # 'featpreproc/func_unwarp/sub-{subject_id}/ses-{session_id}/func/'
        #     'sub-{subject_id}_ses-{session_id}_*_run-{run_id}*_bold_res-1x1x1_preproc'
        #     '_mc_unwarped.nii.gz',
        'highpass':
        '******'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc_*.nii.gz',
        'motion_parameters':
        'derivatives/featpreproc/motion_corrected/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc.param.1D',
        'motion_outlier_files':
        'derivatives/featpreproc/motion_outliers/sub-{subject_id}/ses-{session_id}/func/'
        'art.sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc_mc'
        '_maths_outliers.txt',
        'event_log':
        'sub-{subject_id}/ses-{session_id}/func/'
        # 'sub-{subject_id}_ses-{session_id}*_bold_res-1x1x1_preproc'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}*'
        # '.nii.gz',
        '_events.tsv',
        'ref_func':
        'derivatives/featpreproc/reference/func/*.nii.gz',
        'ref_funcmask':
        'derivatives/featpreproc/reference/func_mask/*.nii.gz',
    }

    inputfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                         name='in_files')

    workflow.connect([
        (inputnode, inputfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
            ('run_id', 'run_id'),
        ]),
    ])

    join_input = pe.JoinNode(
        niu.IdentityInterface(fields=[
            # 'subject_id',
            # 'session_id',
            # 'run_id',
            'funcs',
            'highpass',
            'motion_parameters',
            'motion_outlier_files',
            'event_log',
            'ref_func',
            'ref_funcmask',
        ]),
        joinsource='input',
        joinfield=[
            'funcs',
            'highpass',
            'motion_parameters',
            'motion_outlier_files',
            'event_log',
        ],
        # unique=True,
        name='join_input')

    workflow.connect([
        (inputfiles, join_input, [
            ('funcs', 'funcs'),
            ('highpass', 'highpass'),
            ('motion_parameters', 'motion_parameters'),
            ('motion_outlier_files', 'motion_outlier_files'),
            ('event_log', 'event_log'),
            ('ref_func', 'ref_func'),
            ('ref_funcmask', 'ref_funcmask'),
        ]),
        (join_input, modelfit, [
            ('funcs', 'inputspec.funcs'),
            ('highpass', 'inputspec.highpass'),
            ('motion_parameters', 'inputspec.motion_parameters'),
            ('motion_outlier_files', 'inputspec.motion_outlier_files'),
            ('event_log', 'inputspec.event_log'),
            ('ref_func', 'inputspec.ref_func'),
            ('ref_funcmask', 'inputspec.ref_funcmask'),
        ]),
    ])

    modelfit.inputs.inputspec.fwhm = 2.0
    modelfit.inputs.inputspec.highpass = 50
    modelfit.write_graph(simple_form=True)
    modelfit.write_graph(graph2use='orig', format='png', simple_form=True)
    # modelfit.write_graph(graph2use='detailed', format='png', simple_form=False)

    workflow.stop_on_first_crash = True
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = False
    workflow.write_graph(simple_form=True)
    workflow.write_graph(graph2use='colored', format='png', simple_form=True)
    # workflow.write_graph(graph2use='detailed', format='png', simple_form=False)
    if use_pbs:
        workflow.run(plugin='PBS',
                     plugin_args={'template': os.path.expanduser(template)})
    else:
        workflow.run()
Exemple #5
0
def run_workflow(run_num=None, session=None, csv_file=None, use_pbs=False):
    # Using the name "level1flow" should allow the workingdirs file to be used
    #  by the fmri_workflow pipeline.
    workflow = pe.Workflow(name='level1flow')
    workflow.base_dir = os.path.abspath('./workingdirs')

    featpreproc = create_workflow()

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
    ]),
                        name="input")
    import bids_templates as bt

    if csv_file is not None:
        reader = niu.CSVReader()
        reader.inputs.header = True
        reader.inputs.in_file = csv_file
        out = reader.run()
        subject_list = out.outputs.subject
        session_list = out.outputs.session
        run_list = out.outputs.run

        inputnode.iterables = [
            ('subject_id', subject_list),
            ('session_id', session_list),
            ('run_id', run_list),
        ]
        inputnode.synchronize = True
    else:
        subject_list = bt.subject_list
        session_list = [session] if session is not None else bt.session_list
        assert run_num is not None
        run_list = ['%02d' % run_num]

        inputnode.iterables = [
            ('subject_id', subject_list),
            ('session_id', session_list),
            ('run_id', run_list),
        ]

    templates = {
        'funcs':
        'derivatives/resampled-isotropic-1mm/sub-{subject_id}/ses-{session_id}/func/'
        # 'sub-{subject_id}_ses-{session_id}*_bold_res-1x1x1_preproc'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_bold_res-1x1x1_preproc'
        #'_nvol10'
        '.nii.gz',
    }
    inputfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                         name="input_files")

    workflow.connect([(inputnode, inputfiles, [
        ('subject_id', 'subject_id'),
        ('session_id', 'session_id'),
        ('run_id', 'run_id'),
    ]),
                      (inputnode, featpreproc, [
                          ('subject_id', 'inputspec.subject_id'),
                          ('session_id', 'inputspec.session_id'),
                      ]),
                      (inputfiles, featpreproc, [
                          ('funcs', 'inputspec.funcs'),
                      ])])

    featpreproc.inputs.inputspec.fwhm = 2.0
    featpreproc.inputs.inputspec.highpass = 50  # FWHM in seconds
    #workflow.stop_on_first_crash = True
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = False
    workflow.write_graph()
    if use_pbs:
        workflow.run(plugin='PBS',
                     plugin_args={
                         'template':
                         '/home/jonathan/NHP-BIDS/code/pbs/template.sh'
                     })
    else:
        workflow.run()
def run_workflow(session=None, csv_file=None, use_pbs=False):
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]), name="input")
    import bids_templates as bt

    from nipype import config
    config.enable_debug_mode()

    method = 'fs'  # freesurfer's mri_convert is faster
    if method == 'fs':
        import nipype.interfaces.freesurfer as fs    # freesurfer
    else:
        assert method == 'fsl'
        import nipype.interfaces.fsl as fsl          # fsl

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = 'derivatives/resampled-isotropic-06mm'
    working_dir = 'workingdirs'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]), name="infosource")

    if csv_file is not None:
        reader = niu.CSVReader()
        reader.inputs.header = True  
        reader.inputs.in_file = csv_file
        out = reader.run()

        infosource.iterables = [
            ('session_id', out.outputs.session),
            ('subject_id', out.outputs.subject),
        ]

        infosource.synchronize = True
    else:  # neglected code
        if session is not None:
            session_list = [session]  # ['20170511']
        else:
            session_list = bt.session_list  # ['20170511']

        infosource.iterables = [
            ('session_id', session_list),
            ('subject_id', bt.subject_list),
        ]

    # SelectFiles
    templates = {
        # 'image': 'sub-{subject_id}/ses-{session_id}/{datatype}/'
        'image': 'sub-{subject_id}/ses-{session_id}/anat/'
                'sub-{subject_id}_ses-{session_id}_*.nii.gz',
    }
    inputfiles = Node(
        nio.SelectFiles(templates,
                        base_directory=data_dir), name="input_files")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(
        base_directory=ds_root,
        container=output_dir,
        parameterization=True),
        name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        # BIDS Extension Proposal: BEP003
        ('_resample.nii.gz', '_res-06x06x06_preproc.nii.gz'),
        # remove subdirectories:
        ('resampled-isotropic-06mm/isoxfm-06mm', 'resampled-isotropic-06mm'),
        ('resampled-isotropic-06mm/mriconv-06mm', 'resampled-isotropic-06mm'),
    ]
    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        # this works only if datatype is specified in input
        (r'_datatype_([a-z]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'sub-\3/ses-\2/\1'),
        (r'_fs_iso06mm[0-9]*/', r''),
        (r'/_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'/sub-\2/ses-\1/'),
        # stupid hacks for when datatype is not specified
        (r'//(sub-[^/]*_bold_res-.*)', r'/func/\1'),
        (r'//(sub-[^/]*_phasediff_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_magnitude1_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_epi_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_T1w_res-.*.nii.gz)', r'/anat/\1'),
        (r'//(sub-[^/]*_dwi_res-.*.nii.gz)', r'/dwi/\1'),
    ]

    # -------------------------------------------- Create Pipeline
    isotropic_flow = Workflow(
        name='resample_isotropic06mm',
        base_dir=os.path.join(ds_root, working_dir))

    isotropic_flow.connect([
        (infosource, inputfiles,
         [('subject_id', 'subject_id'),
          ('session_id', 'session_id'),
          ])])

    # --- Convert to 1m isotropic voxels

    if method == 'fs':
        fs_iso06mm = MapNode(
            fs.Resample(
                voxel_size=(0.6, 0.6, 0.6),
                # suffix is not accepted by fs.Resample
                # suffix='_res-1x1x1_preproc',  # BIDS Extension Proposal: BEP003
            ),
            name='fs_iso06mm',
            iterfield=['in_file'],
        )

        isotropic_flow.connect(inputfiles, 'image',
                               fs_iso06mm, 'in_file')
        isotropic_flow.connect(fs_iso06mm, 'resampled_file',
                               outputfiles, 'mriconv-06mm')
    elif method == 'fsl':
        # in_file --> out_file
        isoxfm = Node(fsl.FLIRT(
            apply_isoxfm=0.6,
        ),
            name='isoxfm')

        isotropic_flow.connect(inputfiles, 'image',
                               isoxfm, 'in_file')
        isotropic_flow.connect(inputfiles, 'image',
                               isoxfm, 'reference')
        isotropic_flow.connect(isoxfm, 'out_file',
                               outputfiles, 'isoxfm-06mm')

    isotropic_flow.stop_on_first_crash = False  # True
    isotropic_flow.keep_inputs = True
    isotropic_flow.remove_unnecessary_outputs = False
    isotropic_flow.write_graph()
    outgraph = isotropic_flow.run()
def run_workflow(session, csv_file, use_pbs, stop_on_first_crash,
                 ignore_events):
    import bids_templates as bt

    from nipype import config
    config.enable_debug_mode()

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = ''
    working_dir = 'workingdirs/minimal_processing'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]),
                      name="infosource")

    if csv_file is not None:
        reader = niu.CSVReader()
        reader.inputs.header = True
        reader.inputs.in_file = csv_file
        out = reader.run()
        subject_list = out.outputs.subject
        session_list = out.outputs.session
        infosource.iterables = [
            ('session_id', session_list),
            ('subject_id', subject_list),
        ]
        if 'run' in out.outputs.traits().keys():
            print('Ignoring the "run" field of %s.' % csv_file)

        infosource.synchronize = True
    else:  # neglected code
        if session is not None:
            session_list = [session]  # ['20170511']
        else:
            session_list = bt.session_list  # ['20170511']

        infosource.iterables = [
            ('session_id', session_list),
            ('subject_id', bt.subject_list),
        ]

    process_images = True

    if process_images:
        datatype_list = bt.datatype_list

        imgsource = Node(IdentityInterface(fields=[
            'subject_id',
            'session_id',
            'datatype',
        ]),
                         name="imgsource")
        imgsource.iterables = [
            ('session_id', session_list),
            ('subject_id', subject_list),
            ('datatype', datatype_list),
        ]

        # SelectFiles
        imgfiles = Node(nio.SelectFiles(
            {
                'images': 'sourcedata/%s' % bt.templates['images'],
            },
            base_directory=data_dir),
                        name="img_files")

    evsource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]),
                    name="evsource")
    evsource.iterables = [
        ('session_id', session_list),
        ('subject_id', subject_list),
    ]
    evfiles = Node(nio.SelectFiles(
        {
            'csv_eventlogs':
            'sourcedata/sub-{subject_id}/ses-{session_id}/func/'
            'sub-{subject_id}_ses-{session_id}_*events/Log_*_eventlog.csv',
            'stim_dir':
            'sourcedata/sub-{subject_id}/ses-{session_id}/func/'
            'sub-{subject_id}_ses-{session_id}_*events/',
        },
        base_directory=data_dir),
                   name="evfiles")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(base_directory=ds_root,
                                    container=output_dir,
                                    parameterization=True),
                       name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [('subject_id_', 'sub-'),
                                        ('session_id_', 'ses-'),
                                        ('/minimal_processing/', '/'),
                                        ('_out_reoriented.nii.gz', '.nii.gz')]
    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        (r'_datatype_([a-z]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'sub-\3/ses-\2/\1'),
        (r'/_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)', r'/sub-\2/ses-\1/'),
        (r'/_ro[0-9]+/', r'/'),
        (r'/_csv2tsv[0-9]+/', r'/func/'),
    ]

    # -------------------------------------------- Create Pipeline
    workflow = Workflow(name='wrapper',
                        base_dir=os.path.join(ds_root, working_dir))

    if process_images:
        workflow.connect([(imgsource, imgfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
            ('datatype', 'datatype'),
        ])])

    workflow.connect([
        (evsource, evfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
        ]),
    ])

    if process_images:
        minproc = create_images_workflow()
        workflow.connect(imgfiles, 'images', minproc, 'in.images')
        workflow.connect(minproc, 'out.images', outputfiles,
                         'minimal_processing.@images')

    if not ignore_events:
        csv2tsv = MapNode(ConvertCSVEventLog(),
                          iterfield=['in_file', 'stim_dir'],
                          name='csv2tsv')
        workflow.connect(evfiles, 'csv_eventlogs', csv2tsv, 'in_file')
        workflow.connect(evfiles, 'stim_dir', csv2tsv, 'stim_dir')
        workflow.connect(csv2tsv, 'out_file', outputfiles,
                         'minimal_processing.@eventlogs')

    workflow.stop_on_first_crash = stop_on_first_crash
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = False
    workflow.write_graph()
    #workflow.run(plugin='MultiProc', plugin_args={'n_procs' : 10})
    workflow.run()