Esempio n. 1
0
def create_conversion(name, subject, scans, working_dir, out_dir, folder,
                      xnat_server, xnat_user, xnat_pass, project_id, exp_id):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config['execution'][
        'crashdump_dir'] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(
        util.IdentityInterface(fields=['scan_key', 'scan_val']),
        name='scan_infosource')
    scan_infosource.iterables = [('scan_key', scans.keys()),
                                 ('scan_val', scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(nio.XNATSource(
        infields=['project_id', 'subject_id', 'exp_id', 'scan_id'],
        outfields=['dicom'],
        server=xnat_server,
        user=xnat_user,
        pwd=xnat_pass,
        cache_dir=working_dir),
                      name='xnatsource')

    xnatsource.inputs.query_template = (
        '/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files'
    )  #files')
    xnatsource.inputs.query_template_args['dicom'] = [[
        'project_id', 'subject_id', 'exp_id', 'scan_id'
    ]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [('scan_val', 'scan_id')])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect([
        (scan_infosource, dcmconvert, [('scan_key', 'inputnode.filename')]),
        (xnatsource, dcmconvert, [('dicom', 'inputnode.dicoms')])
    ])

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False),
                name='sink')

    convert.connect([(dcmconvert, sink, [('outputnode.nifti', folder)])])

    convert.run()
Esempio n. 2
0
def create_conversion(
    name, subject, scans, working_dir, out_dir, folder, xnat_server, xnat_user, xnat_pass, project_id, exp_id
):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config["execution"]["crashdump_dir"] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(util.IdentityInterface(fields=["scan_key", "scan_val"]), name="scan_infosource")
    scan_infosource.iterables = [("scan_key", scans.keys()), ("scan_val", scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(
        nio.XNATSource(
            infields=["project_id", "subject_id", "exp_id", "scan_id"],
            outfields=["dicom"],
            server=xnat_server,
            user=xnat_user,
            pwd=xnat_pass,
            cache_dir=working_dir,
        ),
        name="xnatsource",
    )

    xnatsource.inputs.query_template = (
        "/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files"
    )  # files')
    xnatsource.inputs.query_template_args["dicom"] = [["project_id", "subject_id", "exp_id", "scan_id"]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [("scan_val", "scan_id")])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect(
        [
            (scan_infosource, dcmconvert, [("scan_key", "inputnode.filename")]),
            (xnatsource, dcmconvert, [("dicom", "inputnode.dicoms")]),
        ]
    )

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False), name="sink")

    convert.connect([(dcmconvert, sink, [("outputnode.nifti", folder)])])

    convert.run()
def run_workflow(session=None, csv_file=None):
    from nipype import config
    #config.enable_debug_mode()

    method = 'fs'  # freesurfer's mri_convert is faster
    if method == 'fs':
        import nipype.interfaces.freesurfer as fs  # freesurfer
    else:
        assert method == 'fsl'
        import nipype.interfaces.fsl as fsl  # fsl

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = 'derivatives/resampled-isotropic-06mm'
    working_dir = 'workingdirs'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'datatype',
    ]),
                      name="infosource")

    if csv_file is not None:
        # Read csv and use pandas to set-up image and ev-processing
        df = pd.read_csv(csv_file)
        # init lists
        sub_img = []
        ses_img = []
        dt_img = []

        # fill lists to iterate mapnodes
        for index, row in df.iterrows():
            for dt in row.datatype.strip("[]").split(" "):
                if dt in ['anat']:  # only for anatomicals
                    sub_img.append(row.subject)
                    ses_img.append(row.session)
                    dt_img.append(dt)

        # check if the file definitions are ok
        if len(dt_img) > 0:
            print('There are images to process. Will continue.')
        else:
            print('No images specified. Check your csv-file.')

        infosource.iterables = [('session_id', ses_img),
                                ('subject_id', sub_img), ('datatype', dt_img)]
        infosource.synchronize = True
    else:
        print('No csv-file specified. Cannot continue.')

    # SelectFiles
    templates = {
        'image':
        'sub-{subject_id}/ses-{session_id}/{datatype}/'
        'sub-{subject_id}_ses-{session_id}_*.nii.gz',
    }
    inputfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
                      name="input_files")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(base_directory=ds_root,
                                    container=output_dir,
                                    parameterization=True),
                       name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        # BIDS Extension Proposal: BEP003
        ('_resample.nii.gz', '_res-06x06x06_preproc.nii.gz'),
        # remove subdirectories:
        ('resampled-isotropic-06mm/isoxfm-06mm', 'resampled-isotropic-06mm'),
        ('resampled-isotropic-06mm/mriconv-06mm', 'resampled-isotropic-06mm'),
    ]
    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        # this works only if datatype is specified in input
        (r'_datatype_([a-z]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'sub-\3/ses-\2/\1'),
        (r'_fs_iso06mm[0-9]*/', r''),
        (r'/_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)', r'/sub-\2/ses-\1/'),
        # stupid hacks for when datatype is not specified
        (r'//(sub-[^/]*_bold_res-.*)', r'/func/\1'),
        (r'//(sub-[^/]*_phasediff_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_magnitude1_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_epi_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_T1w_res-.*.nii.gz)', r'/anat/\1'),
        (r'//(sub-[^/]*_T2w_res-.*.nii.gz)', r'/anat/\1'),
        (r'//(sub-[^/]*_dwi_res-.*.nii.gz)', r'/dwi/\1'),
    ]

    # -------------------------------------------- Create Pipeline
    isotropic_flow = Workflow(name='resample_isotropic06mm',
                              base_dir=os.path.join(ds_root, working_dir))

    isotropic_flow.connect([(infosource, inputfiles, [
        ('subject_id', 'subject_id'),
        ('session_id', 'session_id'),
        ('datatype', 'datatype'),
    ])])

    # --- Convert to 1m isotropic voxels

    if method == 'fs':
        fs_iso06mm = MapNode(
            fs.Resample(
                voxel_size=(0.6, 0.6, 0.6),
                # suffix is not accepted by fs.Resample
                # suffix='_res-1x1x1_preproc',
                # BIDS Extension Proposal: BEP003
            ),
            name='fs_iso06mm',
            iterfield=['in_file'],
        )

        isotropic_flow.connect(inputfiles, 'image', fs_iso06mm, 'in_file')
        isotropic_flow.connect(fs_iso06mm, 'resampled_file', outputfiles,
                               'mriconv-06mm')
    elif method == 'fsl':
        # in_file --> out_file
        isoxfm = Node(fsl.FLIRT(apply_isoxfm=0.6, ), name='isoxfm')

        isotropic_flow.connect(inputfiles, 'image', isoxfm, 'in_file')
        isotropic_flow.connect(inputfiles, 'image', isoxfm, 'reference')
        isotropic_flow.connect(isoxfm, 'out_file', outputfiles, 'isoxfm-06mm')

    isotropic_flow.stop_on_first_crash = False  # True
    isotropic_flow.keep_inputs = True
    isotropic_flow.remove_unnecessary_outputs = False
    isotropic_flow.write_graph()
    outgraph = isotropic_flow.run()
    def make_neuromet1_workflow(self):

        # Infosource: Iterate through subject names
        infosource = Node(interface=IdentityInterface(fields=['subject_id']),
                          name="infosource")
        infosource.iterables = ('subject_id', self.subject_list)

        #unidensource, return for every subject uni and den
        unidensource = Node(interface=IdentityInterface(
            fields=['uniden_prefix', 'uniden_suffix']),
                            name="unidensource")
        unidensource.iterables = [
            ('uniden_prefix', ['', 'derivatives/Siemens/']),
            ('uniden_suffix', ['T1w', 'desc-UNIDEN_MP2RAGE'])
        ]
        unidensource.synchronize = True

        split_sub_str = Node(Function(['subject_str'],
                                      ['subject_id', 'session_id'],
                                      self.split_subject_ses),
                             name='split_sub_str')

        info = dict(T1w=[[
            'uniden_prefix', 'subject_id', 'session_id', 'anat', 'subject_id',
            'session_id', 'uniden_suffix'
        ]])

        datasource = Node(interface=DataGrabber(infields=[
            'subject_id', 'session_id', 'uniden_prefix', 'uniden_suffix'
        ],
                                                outfields=['T1w']),
                          name='datasource')
        datasource.inputs.base_directory = self.bids_root
        datasource.inputs.template = '%ssub-NeuroMET%s/ses-0%s/%s/sub-NeuroMET%s_ses-0%s_%s.nii.gz'
        datasource.inputs.template_args = info
        datasource.inputs.sort_filelist = False

        sink = self.make_sink()
        segment = self.make_segment()
        mask = self.make_mask()

        neuromet = Workflow(name='NeuroMET', base_dir=self.temp_dir)
        neuromet.connect(infosource, 'subject_id', split_sub_str,
                         'subject_str')
        neuromet.connect(split_sub_str, 'subject_id', datasource, 'subject_id')
        neuromet.connect(split_sub_str, 'session_id', datasource, 'session_id')
        neuromet.connect(unidensource, 'uniden_prefix', datasource,
                         'uniden_prefix')
        neuromet.connect(unidensource, 'uniden_suffix', datasource,
                         'uniden_suffix')
        neuromet.connect(datasource, 'T1w', segment, 'ro.in_file')

        # neuromet.connect()
        neuromet.connect(segment, 'spm_tissues_split.gm', mask,
                         'sum_tissues1.in_file')
        neuromet.connect(segment, 'spm_tissues_split.wm', mask,
                         'sum_tissues1.operand_files')
        neuromet.connect(segment, 'spm_tissues_split.csf', mask,
                         'sum_tissues2.operand_files')
        neuromet.connect(segment, 'spm_tissues_split.gm', sink, '@gm')
        neuromet.connect(segment, 'spm_tissues_split.wm', sink, '@wm')
        neuromet.connect(segment, 'spm_tissues_split.csf', sink, '@csf')
        neuromet.connect(segment, 'seg.bias_corrected_images', sink,
                         '@biascorr')
        # neuromet.connect(comb_imgs, 'uni_brain_den_surr_add.out_file', sink, '@img')
        neuromet.connect(mask, 'gen_mask.out_file', sink, '@mask')
        neuromet.connect(segment, 'ro.out_file', sink, '@ro')

        return neuromet
Esempio n. 5
0
def run_workflow(session=None, csv_file=None, undist=True):
    from nipype import config
    #config.enable_debug_mode()

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = 'derivatives/featpreproc/warp2nmt/highpassed_files'
    working_dir = 'workingdirs'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
        'refsubject_id',
    ]), name="infosource")

    if csv_file is not None:
      print('=== reading csv ===')
      # Read csv and use pandas to set-up image and ev-processing
      df = pd.read_csv(csv_file)
      # init lists
      sub_img=[]; ses_img=[]; run_img=[]; ref_img=[]
      
      # fill lists to iterate mapnodes
      for index, row in df.iterrows():
        for r in row.run.strip("[]").split(" "):
            sub_img.append(row.subject)
            ses_img.append(row.session)
            run_img.append(r)
            if 'refsubject' in df.columns:
                if row.refsubject == 'nan':
                    # empty field
                    ref_img.append(row.subject)
                else:
                    # non-empty field
                    ref_img.append(row.refsubject) 
            else:
                ref_img.append(row.subject)

      infosource.iterables = [
            ('subject_id', sub_img),
            ('session_id', ses_img),
            ('run_id', run_img),
            ('refsubject_id', ref_img),
        ]
      infosource.synchronize = True
    else:
      print("No csv-file specified. Don't know what data to process.")

    # use undistorted epi's if these are requested (need to be generated with undistort workflow)
    if undist:
        func_flag = 'preproc_undistort'
    else:
        func_flag = 'preproc'    
    
    # SelectFiles
    templates = {
        'image': 
        'derivatives/featpreproc/highpassed_files/'
        'sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_bold_res-1x1x1_' + func_flag + '_mc_smooth_mask_gms_tempfilt_maths.nii.gz',

        'imagewarp': 
        'reference-vols/sub-{refsubject_id}/transforms/'
        'sub-{subject_id}_func2nmt_WARP.nii.gz',

        'ref_image': 
        'reference-vols/sub-{refsubject_id}/transforms/'
        'sub-{subject_id}_func2nmt_res-1x1x1.nii.gz',
    }

    inputfiles = Node(
        nio.SelectFiles(templates,
                        base_directory=data_dir), 
                        name="input_files")


    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(
        base_directory=ds_root,
        container=output_dir,
        parameterization=True),
        name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [
        ('refsubject_id_', 'ref-'),
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        ('_Nwarp.nii.gz', '_NMTv2.nii.gz'),
        # remove subdirectories:
        ('highpassed_files/reg_func', 'highpassed_files'),
    ]  
       
    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        (r'_ses-([a-zA-Z0-9]+)_sub-([a-zA-Z0-9]+)', r'sub-\2/ses-\1/func'),
        (r'_ref-([a-zA-Z0-9]+)_run_id_[0-9][0-9]', r''),
    ]


    # -------------------------------------------- Create Pipeline
    warp2nmt = Workflow(
        name='warp2nmt',
        base_dir=os.path.join(ds_root, working_dir))

    warp2nmt.connect([
        (infosource, inputfiles,
         [('subject_id', 'subject_id'),
          ('session_id', 'session_id'),
          ('run_id', 'run_id'),
          ('refsubject_id', 'refsubject_id'),
          ])])
       
    nwarp = Node(afni.NwarpApply(out_file='%s_Nwarp.nii.gz'),name='nwarp')       
    warp2nmt.connect(inputfiles, 'image',
                        nwarp, 'in_file')
    warp2nmt.connect(inputfiles, 'imagewarp',
                        nwarp, 'warp')
    warp2nmt.connect(inputfiles, 'ref_image',
                        nwarp, 'master')
    warp2nmt.connect(nwarp, 'out_file',
                        outputfiles, 'reg_func')

    warp2nmt.stop_on_first_crash = False  # True
    warp2nmt.keep_inputs = True
    warp2nmt.remove_unnecessary_outputs = False
    warp2nmt.write_graph()
    warp2nmt.run()
scans = dict()
scans['inv1'] = 'MP2RAGE_INV1'
scans['inv2'] = 'MP2RAGE_INV2'
scans['t1map'] = 'MP2RAGE_T1'
scans['uni'] = 'MP2RAGE_UNI'

pull = Workflow(name='pull')
pull.base_dir = base_dir
pull.config['execution']['crashdump_dir'] = pull.base_dir + "/crash_files"

# infosource to iterate over scans
scan_infosource = Node(util.IdentityInterface(fields=['scan_key', 'scan_val']),
                       name='scan_infosource')
scan_infosource.iterables = [('scan_key', scans.keys()),
                             ('scan_val', scans.values())]
scan_infosource.synchronize = True

# xnat source
xnatsource = Node(nio.XNATSource(
    infields=['project_id', 'subject_id', 'exp_id', 'scan_id'],
    outfields=['nifti'],
    server=xnat_server,
    user=xnat_user,
    pwd=xnat_pass,
    cache_dir=base_dir),
                  name='xnatsource')

xnatsource.inputs.query_template = (
    '/projects/%s/subjects/%s/experiments/%s/scans/%s/resources/NIFTI/files'
)  #files')
xnatsource.inputs.query_template_args['nifti'] = [[
    tstat1 = os.path.abspath('palm_corr_dti_vox_tstat_c1.nii.gz')
    tstat2 = os.path.abspath('palm_corr_dti_vox_tstat_c2.nii.gz')
    P_value1 = os.path.abspath('palm_corr_dti_tfce_tstat_fwep_c1.nii.gz')
    P_value2 = os.path.abspath('palm_corr_dti_tfce_tstat_fwep_c2.nii.gz')

    return tstat1, tstat2, P_value1, P_value2


palm_corr = Node(name='palm_corr',
                 interface=Function(
                     input_names=['in_file', 'mask', 'design', 'contrast'],
                     output_names=['tstat1', 'tstat2', 'P_value1', 'P_value2'],
                     function=palm_corr))

palm_corr.iterables = [("design", designs), ("contrast", contrasts)]
palm_corr.synchronize = True  # synchronize here serves to make sure design and contrast are used in pairs
# Not using all the possible permuatations
#-----------------------------------------------------------------------------------------------------
# use the tstat maps to calculate r-pearson correlation coeeficient
# >>> fslmaths tstat.nii.gz -sqr tstat2.nii.gz
# >>> fslmaths tstat.nii.gz -abs -div tstat.nii.gz sign.nii.gz
# >>> fslmaths tstat2.nii.gz -add DF denominator.nii.gz
# >>> fslmaths tstat2.nii.gz -div denominator.nii.gz -sqrt -mul sign.nii.gz correlation.nii.gz
square1 = Node(fsl.UnaryMaths(), name='square1')
square1.inputs.operation = 'sqr'
square1.inputs.out_file = 'tstat1_squared.nii.gz'

sign_t1 = Node(fsl.ImageMaths(), name='sign_t1')
sign_t1.inputs.op_string = '-abs -div'
sign_t1.inputs.out_file = 'sign_tstat1.nii.gz'
def run_workflow(csv_file, stop_on_first_crash, ignore_events):

    from nipype import config
    #config.enable_debug_mode()

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = ''
    working_dir = 'workingdirs/minimal_processing'

    # ------------------ Input Files
    # Read csv and use pandas to set-up image and ev-processing
    df = pd.read_csv(csv_file)
    # init lists
    sub_img = []
    ses_img = []
    dt_img = []
    sub_ev = []
    ses_ev = []
    run_ev = []

    # fill lists to iterate mapnodes
    for index, row in df.iterrows():
        for dt in row.datatype.strip("[]").split(" "):
            sub_img.append(row.subject)
            ses_img.append(row.session)
            dt_img.append(dt)
        for r in row.run.strip("[]").split(" "):
            sub_ev.append(row.subject)
            ses_ev.append(row.session)
            run_ev.append(r)

    # check if the file definitions are ok
    if len(dt_img) > 0:
        process_images = True
    else:
        process_images = False
        print('NB! No data-types specified. Not processing any images.')
        print('Check the csv-file if this is unexpected.')

    if len(run_ev) > 0:
        process_ev = True
    else:
        process_ev = False
        print('NB! No runs spcfied. Not processing eventlog files.'
              ' Images will still be processed.')
        print('Check the csv-file if this is unexpected.')

    if process_images:
        imgsource = Node(IdentityInterface(fields=[
            'subject_id',
            'session_id',
            'datatype',
        ]),
                         name="imgsource")
        imgsource.iterables = [('session_id', ses_img),
                               ('subject_id', sub_img), ('datatype', dt_img)]
        imgsource.synchronize = True

        # SelectFiles
        imgfiles = Node(nio.SelectFiles(
            {
                'images':
                'sourcedata/sub-{subject_id}/ses-{session_id}/{datatype}/'
                'sub-{subject_id}_ses-{session_id}_*.nii.gz'
            },
            base_directory=data_dir),
                        name="img_files")

    if not ignore_events and process_ev:  # only create an event node when handling events
        evsource = Node(IdentityInterface(fields=[
            'subject_id',
            'session_id',
            'run_id',
        ]),
                        name="evsource")
        evsource.iterables = [
            ('subject_id', sub_ev),
            ('session_id', ses_ev),
            ('run_id', run_ev),
        ]
        evsource.synchronize = True
        evfiles = Node(nio.SelectFiles(
            {
                'csv_eventlogs':
                'sourcedata/sub-{subject_id}/ses-{session_id}/func/'
                'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_events/Log_*_eventlog.csv',
                'stim_dir':
                'sourcedata/sub-{subject_id}/ses-{session_id}/func/'
                'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_events/',
            },
            base_directory=data_dir),
                       name="evfiles")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(base_directory=ds_root,
                                    container=output_dir,
                                    parameterization=True),
                       name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [('subject_id_', 'sub-'),
                                        ('session_id_', 'ses-'),
                                        ('/minimal_processing/', '/'),
                                        ('_out_reoriented.nii.gz', '.nii.gz')]

    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        (r'_run_id_([a-zA-Z0-9]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'/sub-\3/ses-\2/'),
        (r'_datatype_([a-z]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'sub-\3/ses-\2/\1'),
        (r'/_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)', r'/sub-\2/ses-\1/'),
        (r'/_ro[0-9]+/', r'/'),
        (r'/_csv2tsv[0-9]+/', r'/func/'),
    ]

    # -------------------------------------------- Create Pipeline
    workflow = Workflow(name='wrapper',
                        base_dir=os.path.join(ds_root, working_dir))

    if process_images:
        workflow.connect([(imgsource, imgfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
            ('datatype', 'datatype'),
        ])])

    if not ignore_events and process_ev:
        workflow.connect([
            (evsource, evfiles, [
                ('subject_id', 'subject_id'),
                ('session_id', 'session_id'),
                ('run_id', 'run_id'),
            ]),
        ])

    if process_images:
        minproc = create_images_workflow()
        workflow.connect(imgfiles, 'images', minproc, 'in.images')
        workflow.connect(minproc, 'out.images', outputfiles,
                         'minimal_processing.@images')

    if not ignore_events and process_ev:
        csv2tsv = MapNode(ConvertCSVEventLog(),
                          iterfield=['in_file', 'stim_dir'],
                          name='csv2tsv')

        workflow.connect(evfiles, 'csv_eventlogs', csv2tsv, 'in_file')
        workflow.connect(evfiles, 'stim_dir', csv2tsv, 'stim_dir')
        workflow.connect(csv2tsv, 'out_file', outputfiles,
                         'minimal_processing.@eventlogs')

    workflow.stop_on_first_crash = stop_on_first_crash
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = True
    workflow.write_graph()
    workflow.run()
def run_workflow(session=None, csv_file=None, use_pbs=False):
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]), name="input")
    import bids_templates as bt

    from nipype import config
    config.enable_debug_mode()

    method = 'fs'  # freesurfer's mri_convert is faster
    if method == 'fs':
        import nipype.interfaces.freesurfer as fs    # freesurfer
    else:
        assert method == 'fsl'
        import nipype.interfaces.fsl as fsl          # fsl

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = 'derivatives/resampled-isotropic-06mm'
    working_dir = 'workingdirs'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]), name="infosource")

    if csv_file is not None:
        reader = niu.CSVReader()
        reader.inputs.header = True  
        reader.inputs.in_file = csv_file
        out = reader.run()

        infosource.iterables = [
            ('session_id', out.outputs.session),
            ('subject_id', out.outputs.subject),
        ]

        infosource.synchronize = True
    else:  # neglected code
        if session is not None:
            session_list = [session]  # ['20170511']
        else:
            session_list = bt.session_list  # ['20170511']

        infosource.iterables = [
            ('session_id', session_list),
            ('subject_id', bt.subject_list),
        ]

    # SelectFiles
    templates = {
        # 'image': 'sub-{subject_id}/ses-{session_id}/{datatype}/'
        'image': 'sub-{subject_id}/ses-{session_id}/anat/'
                'sub-{subject_id}_ses-{session_id}_*.nii.gz',
    }
    inputfiles = Node(
        nio.SelectFiles(templates,
                        base_directory=data_dir), name="input_files")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(
        base_directory=ds_root,
        container=output_dir,
        parameterization=True),
        name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        # BIDS Extension Proposal: BEP003
        ('_resample.nii.gz', '_res-06x06x06_preproc.nii.gz'),
        # remove subdirectories:
        ('resampled-isotropic-06mm/isoxfm-06mm', 'resampled-isotropic-06mm'),
        ('resampled-isotropic-06mm/mriconv-06mm', 'resampled-isotropic-06mm'),
    ]
    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        # this works only if datatype is specified in input
        (r'_datatype_([a-z]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'sub-\3/ses-\2/\1'),
        (r'_fs_iso06mm[0-9]*/', r''),
        (r'/_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'/sub-\2/ses-\1/'),
        # stupid hacks for when datatype is not specified
        (r'//(sub-[^/]*_bold_res-.*)', r'/func/\1'),
        (r'//(sub-[^/]*_phasediff_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_magnitude1_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_epi_res-.*.nii.gz)', r'/fmap/\1'),
        (r'//(sub-[^/]*_T1w_res-.*.nii.gz)', r'/anat/\1'),
        (r'//(sub-[^/]*_dwi_res-.*.nii.gz)', r'/dwi/\1'),
    ]

    # -------------------------------------------- Create Pipeline
    isotropic_flow = Workflow(
        name='resample_isotropic06mm',
        base_dir=os.path.join(ds_root, working_dir))

    isotropic_flow.connect([
        (infosource, inputfiles,
         [('subject_id', 'subject_id'),
          ('session_id', 'session_id'),
          ])])

    # --- Convert to 1m isotropic voxels

    if method == 'fs':
        fs_iso06mm = MapNode(
            fs.Resample(
                voxel_size=(0.6, 0.6, 0.6),
                # suffix is not accepted by fs.Resample
                # suffix='_res-1x1x1_preproc',  # BIDS Extension Proposal: BEP003
            ),
            name='fs_iso06mm',
            iterfield=['in_file'],
        )

        isotropic_flow.connect(inputfiles, 'image',
                               fs_iso06mm, 'in_file')
        isotropic_flow.connect(fs_iso06mm, 'resampled_file',
                               outputfiles, 'mriconv-06mm')
    elif method == 'fsl':
        # in_file --> out_file
        isoxfm = Node(fsl.FLIRT(
            apply_isoxfm=0.6,
        ),
            name='isoxfm')

        isotropic_flow.connect(inputfiles, 'image',
                               isoxfm, 'in_file')
        isotropic_flow.connect(inputfiles, 'image',
                               isoxfm, 'reference')
        isotropic_flow.connect(isoxfm, 'out_file',
                               outputfiles, 'isoxfm-06mm')

    isotropic_flow.stop_on_first_crash = False  # True
    isotropic_flow.keep_inputs = True
    isotropic_flow.remove_unnecessary_outputs = False
    isotropic_flow.write_graph()
    outgraph = isotropic_flow.run()
Esempio n. 10
0
def run_workflow(session, csv_file, use_pbs, stop_on_first_crash,
                 ignore_events):
    import bids_templates as bt

    from nipype import config
    config.enable_debug_mode()

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = ''
    working_dir = 'workingdirs/minimal_processing'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]),
                      name="infosource")

    if csv_file is not None:
        reader = niu.CSVReader()
        reader.inputs.header = True
        reader.inputs.in_file = csv_file
        out = reader.run()
        subject_list = out.outputs.subject
        session_list = out.outputs.session
        infosource.iterables = [
            ('session_id', session_list),
            ('subject_id', subject_list),
        ]
        if 'run' in out.outputs.traits().keys():
            print('Ignoring the "run" field of %s.' % csv_file)

        infosource.synchronize = True
    else:  # neglected code
        if session is not None:
            session_list = [session]  # ['20170511']
        else:
            session_list = bt.session_list  # ['20170511']

        infosource.iterables = [
            ('session_id', session_list),
            ('subject_id', bt.subject_list),
        ]

    process_images = True

    if process_images:
        datatype_list = bt.datatype_list

        imgsource = Node(IdentityInterface(fields=[
            'subject_id',
            'session_id',
            'datatype',
        ]),
                         name="imgsource")
        imgsource.iterables = [
            ('session_id', session_list),
            ('subject_id', subject_list),
            ('datatype', datatype_list),
        ]

        # SelectFiles
        imgfiles = Node(nio.SelectFiles(
            {
                'images': 'sourcedata/%s' % bt.templates['images'],
            },
            base_directory=data_dir),
                        name="img_files")

    evsource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
    ]),
                    name="evsource")
    evsource.iterables = [
        ('session_id', session_list),
        ('subject_id', subject_list),
    ]
    evfiles = Node(nio.SelectFiles(
        {
            'csv_eventlogs':
            'sourcedata/sub-{subject_id}/ses-{session_id}/func/'
            'sub-{subject_id}_ses-{session_id}_*events/Log_*_eventlog.csv',
            'stim_dir':
            'sourcedata/sub-{subject_id}/ses-{session_id}/func/'
            'sub-{subject_id}_ses-{session_id}_*events/',
        },
        base_directory=data_dir),
                   name="evfiles")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(base_directory=ds_root,
                                    container=output_dir,
                                    parameterization=True),
                       name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [('subject_id_', 'sub-'),
                                        ('session_id_', 'ses-'),
                                        ('/minimal_processing/', '/'),
                                        ('_out_reoriented.nii.gz', '.nii.gz')]
    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        (r'_datatype_([a-z]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'sub-\3/ses-\2/\1'),
        (r'/_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)', r'/sub-\2/ses-\1/'),
        (r'/_ro[0-9]+/', r'/'),
        (r'/_csv2tsv[0-9]+/', r'/func/'),
    ]

    # -------------------------------------------- Create Pipeline
    workflow = Workflow(name='wrapper',
                        base_dir=os.path.join(ds_root, working_dir))

    if process_images:
        workflow.connect([(imgsource, imgfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
            ('datatype', 'datatype'),
        ])])

    workflow.connect([
        (evsource, evfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
        ]),
    ])

    if process_images:
        minproc = create_images_workflow()
        workflow.connect(imgfiles, 'images', minproc, 'in.images')
        workflow.connect(minproc, 'out.images', outputfiles,
                         'minimal_processing.@images')

    if not ignore_events:
        csv2tsv = MapNode(ConvertCSVEventLog(),
                          iterfield=['in_file', 'stim_dir'],
                          name='csv2tsv')
        workflow.connect(evfiles, 'csv_eventlogs', csv2tsv, 'in_file')
        workflow.connect(evfiles, 'stim_dir', csv2tsv, 'stim_dir')
        workflow.connect(csv2tsv, 'out_file', outputfiles,
                         'minimal_processing.@eventlogs')

    workflow.stop_on_first_crash = stop_on_first_crash
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = False
    workflow.write_graph()
    #workflow.run(plugin='MultiProc', plugin_args={'n_procs' : 10})
    workflow.run()
def run_workflows(session=None, csv_file=None):
    from nipype import config
    #config.enable_debug_mode()

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = 'derivatives/undistort'
    working_dir = 'workingdirs'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
        'refsubject_id',
    ]), name="infosource")

    if csv_file is not None:
      print('=== reading csv ===')
      # Read csv and use pandas to set-up image and ev-processing
      df = pd.read_csv(csv_file)
      # init lists
      sub_img=[]; ses_img=[]; run_img=[]; ref_img=[]
      
      # fill lists to iterate mapnodes
      for index, row in df.iterrows():
        for r in row.run.strip("[]").split(" "):
            sub_img.append(row.subject)
            ses_img.append(row.session)
            run_img.append(r)
            if 'refsubject' in df.columns:
                if row.refsubject == 'nan':
                    # empty field
                    ref_img.append(row.subject)
                else:
                    # non-empty field
                    ref_img.append(row.refsubject) 
            else:
                ref_img.append(row.subject)

      infosource.iterables = [
            ('subject_id', sub_img),
            ('session_id', ses_img),
            ('run_id', run_img),
            ('refsubject_id', ref_img),
        ]
      infosource.synchronize = True
    else:
      print("No csv-file specified. Don't know what data to process.")


    # SelectFiles
    templates = {
        'image': 
        'derivatives/resampled-isotropic-1mm/'
        'sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_bold_res-1x1x1_preproc.nii.gz',
        'image_invPE': 
        'derivatives/resampled-isotropic-1mm/'
        'sub-{subject_id}/ses-{session_id}/fmap/'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_epi_res-1x1x1_preproc.nii.gz',
    }
    
    inputfiles = Node(
        nio.SelectFiles(templates,
                        base_directory=data_dir), 
                        name="input_files")

    # Datasink
    outfiles = Node(nio.DataSink(
        base_directory=ds_root,
        container=output_dir,
        parameterization=True),
        name="outfiles")

    # Use the following DataSink output substitutions
    outfiles.inputs.substitutions = [
        ('refsubject_id_', 'ref-'),
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        ('resampled-isotropic-1mm','undistort'),
        ('undistort/ud_func', 'undistort'),
    ]  
       
    outfiles.inputs.regexp_substitutions = [
        (r'_ses-([a-zA-Z0-9]+)_sub-([a-zA-Z0-9]+)', r'sub-\2/ses-\1/func'),
        (r'_ref-([a-zA-Z0-9]+)_run_id_[0-9][0-9]', r''),
    ]
    
    templates_mv = {
        'ud_minus': 
        'derivatives/resampled-isotropic-1mm/'
        'sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_bold_res-1x1x1_preproc_MINUS.nii.gz',
        'ud_minus_warp': 
        'derivatives/resampled-isotropic-1mm/'
        'sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_bold_res-1x1x1_preproc_MINUS_WARP.nii.gz',
        'ud_plus': 
        'derivatives/resampled-isotropic-1mm/'
        'sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_bold_res-1x1x1_preproc_PLUS.nii.gz',
        'ud_plus_warp': 
        'derivatives/resampled-isotropic-1mm/'
        'sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_bold_res-1x1x1_preproc_PLUS_WARP.nii.gz',
    }
    
    mv_infiles = Node(
        nio.SelectFiles(templates_mv,
                        base_directory=data_dir), 
                        name="mv_infiles")

    # Datasink
    mv_outfiles = Node(nio.DataSink(
        base_directory=ds_root,
        container=output_dir,
        parameterization=True),
        name="mv_outfiles")

    # Use the following DataSink output substitutions
    mv_outfiles.inputs.substitutions = [
        ('refsubject_id_', 'ref-'),
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        ('resampled-isotropic-1mm','undistort'),
        ('undistort/ud_func', 'undistort'),
    ]  
       
    mv_outfiles.inputs.regexp_substitutions = [
        (r'sub-([a-zA-Z0-9]+)_ses-([a-zA-Z0-9]+)', r'sub-\1/ses-\2/func/qwarp_plusminus/sub-\1_ses-\2'),
    ]    
    
    # -------------------------------------------- Create Pipeline
    undistort = Workflow(
        name='undistort',
        base_dir=os.path.join(ds_root, working_dir))

    undistort.connect([
        (infosource, inputfiles,
         [('subject_id', 'subject_id'),
          ('session_id', 'session_id'),
          ('run_id', 'run_id'),
          ('refsubject_id', 'refsubject_id'),
          ])])
               
    qwarp = Node(afni.QwarpPlusMinus(
        nopadWARP=True,outputtype='NIFTI_GZ'),
                    iterfield=('in_file'),name='qwarp')       
        
    undistort.connect(inputfiles, 'image',
                        qwarp, 'in_file')
    undistort.connect(inputfiles, 'image_invPE',
                        qwarp, 'base_file') 
    undistort.connect(inputfiles, 'image',
                        qwarp, 'out_file')    
  
    nwarp = Node(afni.NwarpApply(out_file='%s_undistort.nii.gz'),name='nwarp')
    
    undistort.connect(inputfiles, 'image',
                     nwarp, 'in_file')
    undistort.connect(qwarp, 'source_warp',
                     nwarp, 'warp')
    undistort.connect(inputfiles, 'image',
                     nwarp, 'master')
    undistort.connect(nwarp, 'out_file',
                     outfiles, 'ud_func')

    undistort.stop_on_first_crash = False  # True
    undistort.keep_inputs = True
    undistort.remove_unnecessary_outputs = False
    undistort.write_graph()
    undistort.run()

    mv_ud = Workflow(
        name='mv_ud',
        base_dir=os.path.join(ds_root, working_dir))

    mv_ud.connect([
        (infosource, mv_infiles,
         [('subject_id', 'subject_id'),
          ('session_id', 'session_id'),
          ('run_id', 'run_id'),
          ('refsubject_id', 'refsubject_id'),
          ])])
    
    mv_ud.connect(mv_infiles, 'ud_minus',
                        mv_outfiles, 'ud_func.@ud_minus')
    mv_ud.connect(mv_infiles, 'ud_plus',
                        mv_outfiles, 'ud_func.@ud_plus')
    mv_ud.connect(mv_infiles, 'ud_minus_warp',
                        mv_outfiles, 'ud_func.@ud_minus_warp')
    mv_ud.connect(mv_infiles, 'ud_plus_warp',
                        mv_outfiles, 'ud_func.@ud_plus_warp')
    
    mv_ud.stop_on_first_crash = False  # True
    mv_ud.keep_inputs = True
    mv_ud.remove_unnecessary_outputs = False
    mv_ud.write_graph()
    mv_ud.run()

    # remove the undistorted files from the ...derivatives/resampled folder
    for index, row in df.iterrows():
        fpath = os.path.join(data_dir,'derivatives','resampled-isotropic-1mm',
                     'sub-' + row.subject,'ses-' + str(row.session),'func')
        for f in glob.glob(os.path.join(fpath,'*US*.nii.gz')):
            os.remove(f)
def run_workflow(session=None, csv_file=None, undist=True):
    from nipype import config
    #config.enable_debug_mode()

    # ------------------ Specify variables
    ds_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    data_dir = ds_root
    output_dir = 'derivatives/featpreproc/motion_outliers'
    working_dir = 'workingdirs'

    # ------------------ Input Files
    infosource = Node(IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
    ]),
                      name="infosource")

    if csv_file is not None:
        print('=== reading csv ===')
        # Read csv and use pandas to set-up image and ev-processing
        df = pd.read_csv(csv_file)
        # init lists
        sub_img = []
        ses_img = []
        run_img = []

        # fill lists to iterate mapnodes
        for index, row in df.iterrows():
            for r in row.run.strip("[]").split(" "):
                sub_img.append(row.subject)
                ses_img.append(row.session)
                run_img.append(r)

        infosource.iterables = [
            ('subject_id', sub_img),
            ('session_id', ses_img),
            ('run_id', run_img),
        ]
        infosource.synchronize = True
    else:
        print("No csv-file specified. Don't know what data to process.")

    # use undistorted epi's if these are requested (need to be generated with undistort workflow)
    if undist:
        func_flag = 'preproc_undistort'
    else:
        func_flag = 'preproc'

    # SelectFiles
    templates = {
        'motion_outlier_files':
        'derivatives/featpreproc/motion_outliers/sub-{subject_id}/'
        'ses-{session_id}/func/art.sub-{subject_id}_ses-{session_id}_*_'
        'run-{run_id}_bold_res-1x1x1_' + func_flag + '_mc_maths_outliers.txt',
        'masks':
        'derivatives/featpreproc/motion_outliers/sub-{subject_id}/'
        'ses-{session_id}/func/mask.sub-{subject_id}_ses-{session_id}_*_'
        'run-{run_id}_bold_res-1x1x1_' + func_flag + '_mc_maths.nii.gz',
        'motion_corrected':
        'derivatives/featpreproc/motion_corrected/sub-{subject_id}/'
        'ses-{session_id}/func/sub-{subject_id}_ses-{session_id}_*_'
        'run-{run_id}_bold_res-1x1x1_' + func_flag + '_mc.nii.gz',
    }

    inputfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
                      name="input_files")

    # ------------------ Output Files
    # Datasink
    outputfiles = Node(nio.DataSink(base_directory=ds_root,
                                    container=output_dir,
                                    parameterization=True),
                       name="output_files")

    # Use the following DataSink output substitutions
    outputfiles.inputs.substitutions = [
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
        ('run_id_', 'run-'),
        ('/merged_outliers/', '/'),
        ('/fslmotionoutlier_file/', '/'),
        ('bold_res-1x1x1_' + func_flag + '_mc_outliers',
         func_flag + '_fslmotionoutliers'),
    ]

    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        (r'_run-([a-zA-Z0-9]*)_ses-([a-zA-Z0-9]*)_sub-([a-zA-Z0-9]*)',
         r'/sub-\3/ses-\2/func/'),
    ]

    # -------------------------------------------- Create Pipeline
    fslmotionoutliers = Workflow(name='fslmotionoutliers',
                                 base_dir=os.path.join(ds_root, working_dir))

    fslmotionoutliers.connect([(infosource, inputfiles, [
        ('subject_id', 'subject_id'),
        ('session_id', 'session_id'),
        ('run_id', 'run_id'),
    ])])

    GetOutliers = Node(fsl.MotionOutliers(), name='GetOutliers')

    GetOutliers.inputs.no_motion_correction = True

    fslmotionoutliers.connect(inputfiles, 'motion_corrected', GetOutliers,
                              'in_file')
    fslmotionoutliers.connect(inputfiles, 'masks', GetOutliers, 'mask')
    fslmotionoutliers.connect(GetOutliers, 'out_file', outputfiles,
                              'fslmotionoutlier_file')

    # convert the fsl style design matrix to AFNI style volume indeces
    ConvToAFNI = Node(name='ConvtoAFNI',
                      interface=Function(
                          input_names=['fslmat', 'rafile', 'undist'],
                          output_names=['mergedoutliers_file'],
                          function=combine_outlier_files,
                      ))

    fslmotionoutliers.connect(GetOutliers, 'out_file', ConvToAFNI, 'fslmat')
    fslmotionoutliers.connect(inputfiles, 'motion_outlier_files', ConvToAFNI,
                              'rafile')
    fslmotionoutliers.connect(ConvToAFNI, 'mergedoutliers_file', outputfiles,
                              'merged_outliers')

    fslmotionoutliers.stop_on_first_crash = False  # True
    fslmotionoutliers.keep_inputs = True
    fslmotionoutliers.remove_unnecessary_outputs = False
    fslmotionoutliers.write_graph()
    fslmotionoutliers.run()