Esempio n. 1
0
def test_bids_naming():
    from samri.pipelines.extra_functions import get_data_selection
    from samri.pipelines.utils import bids_naming
    import pandas as pd
    import os

    bruker_data_dir = path.join(DATA_DIR, 'bruker')
    f_data_selection = get_data_selection(
        bruker_data_dir,
        match={
            'task': ['JogB', 'CogB', 'CogB2m'],
        },
    )
    s_data_selection = get_data_selection(
        bruker_data_dir,
        match={
            'acquisition': ['TurboRARE', 'TurboRARElowcov'],
        },
    )

    name = bids_naming(
        subject_session=('5706', 'ofMpF'),
        metadata=f_data_selection,
    )
    assert_and_feedback(name,
                        'sub-5706_ses-ofMpF_task-CogB_acq-EPI_cbv.nii.gz',
                        debug=f_data_selection)

    name = bids_naming(
        subject_session=('4011', 'ofMaF'),
        metadata=s_data_selection,
    )
    assert_and_feedback(name,
                        'sub-4011_ses-ofMaF_acq-TurboRARElowcov_T2w.nii.gz',
                        debug=s_data_selection)

    name = bids_naming(
        subject_session=('5704', 'ofMpF'),
        metadata=f_data_selection,
    )
    assert_and_feedback(name,
                        'sub-5704_ses-ofMpF_task-CogB_acq-EPI_cbv.nii.gz',
                        debug=f_data_selection)

    name = bids_naming(
        subject_session=('5704', 'ofMpF'),
        metadata=s_data_selection,
    )
    assert_and_feedback(name,
                        'sub-5704_ses-ofMpF_acq-TurboRARE_T2w.nii.gz',
                        debug=s_data_selection)
Esempio n. 2
0
def test_bids_naming():
	from samri.pipelines.extra_functions import get_data_selection
	from samri.pipelines.utils import bids_naming
	import pandas as pd
	import os

	bruker_data_dir = path.join(DATA_DIR,'bruker')
	f_data_selection = get_data_selection(bruker_data_dir,
		match={
			'task':['JogB','CogB','CogB2m'],
			},
		)
	s_data_selection = get_data_selection(bruker_data_dir,
		match={
			'acquisition':['TurboRARE', 'TurboRARElowcov'],
			},
		)

	name = bids_naming(
			subject_session=('5706','ofMpF'),
			metadata=f_data_selection,
			)
	assert_and_feedback(name,'sub-5706_ses-ofMpF_task-CogB_acq-EPI_cbv.nii.gz', debug=f_data_selection)

	name = bids_naming(
			subject_session=('4011','ofMaF'),
			metadata=s_data_selection,
			)
	assert_and_feedback(name,'sub-4011_ses-ofMaF_acq-TurboRARElowcov_T2w.nii.gz', debug=s_data_selection)

	name = bids_naming(
			subject_session=('5704','ofMpF'),
			metadata=f_data_selection,
			)
	assert_and_feedback(name,'sub-5704_ses-ofMpF_task-CogB_acq-EPI_cbv.nii.gz', debug=f_data_selection)

	name = bids_naming(
			subject_session=('5704','ofMpF'),
			metadata=s_data_selection,
			)
	assert_and_feedback(name,'sub-5704_ses-ofMpF_acq-TurboRARE_T2w.nii.gz', debug=s_data_selection)
Esempio n. 3
0
def bruker(
    measurements_base,
    template,
    DEBUG=False,
    exclude={},
    functional_match={},
    structural_match={},
    sessions=[],
    subjects=[],
    actual_size=True,
    functional_blur_xy=False,
    functional_registration_method="structural",
    highpass_sigma=225,
    lowpass_sigma=None,
    negative_contrast_agent=False,
    n_procs=N_PROCS,
    realign="time",
    registration_mask=False,
    tr=1,
    very_nasty_bruker_delay_hack=False,
    workflow_name="generic",
    keep_work=False,
    autorotate=False,
    strict=False,
    verbose=False,
):
    '''

	realign: {"space","time","spacetime",""}
		Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!

	'''
    if template:
        if template == "mouse":
            template = fetch_mouse_DSURQE()['template']
            registration_mask = fetch_mouse_DSURQE()['mask']
        elif template == "rat":
            template = fetch_rat_waxholm()['template']
            registration_mask = fetch_rat_waxholm()['mask']
        else:
            pass
    else:
        raise ValueError("No species or template specified")
        return -1

    measurements_base = path.abspath(path.expanduser(measurements_base))

    # add subject and session filters if present
    if subjects:
        structural_scan_types['subject'] = subjects
    if sessions:
        structural_scan_types['session'] = sessions

    # define measurement directories to be processed, and populate the list either with the given include_measurements, or with an intelligent selection
    data_selection = pd.DataFrame([])
    if structural_match:
        s_data_selection = get_data_selection(
            measurements_base,
            match=structural_match,
            exclude=exclude,
        )
        structural_scan_types = s_data_selection['scan_type'].unique()
        data_selection = pd.concat([data_selection, s_data_selection])
    if functional_match:
        f_data_selection = get_data_selection(
            measurements_base,
            match=functional_match,
            exclude=exclude,
        )
        functional_scan_types = f_data_selection['scan_type'].unique()
        data_selection = pd.concat([data_selection, f_data_selection])

    # we currently only support one structural scan type per session
    #if functional_registration_method in ("structural", "composite") and structural_scan_types:
    #	structural_scan_types = [structural_scan_types[0]]

    # we start to define nipype workflow elements (nodes, connections, meta)
    subjects_sessions = data_selection[["subject", "session"
                                        ]].drop_duplicates().values.tolist()
    if debug:
        print('Data selection:')
        print(data_selection)
        print('Iterating over:')
        print(subjects_sessions)
    infosource = pe.Node(interface=util.IdentityInterface(
        fields=['subject_session'], mandatory_inputs=False),
                         name="infosource")
    infosource.iterables = [('subject_session', subjects_sessions)]

    get_f_scan = pe.Node(name='get_f_scan',
                         interface=util.Function(
                             function=get_scan,
                             input_names=inspect.getargspec(get_scan)[0],
                             output_names=['scan_path', 'scan_type', 'trial']))
    if not strict:
        get_f_scan.inputs.ignore_exception = True
    get_f_scan.inputs.data_selection = data_selection
    get_f_scan.inputs.measurements_base = measurements_base
    get_f_scan.iterables = ("scan_type", functional_scan_types)

    f_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="f_bru2nii")
    f_bru2nii.inputs.actual_size = actual_size

    dummy_scans = pe.Node(
        name='dummy_scans',
        interface=util.Function(
            function=force_dummy_scans,
            input_names=inspect.getargspec(force_dummy_scans)[0],
            output_names=['out_file']))
    dummy_scans.inputs.desired_dummy_scans = DUMMY_SCANS

    bandpass = pe.Node(interface=fsl.maths.TemporalFilter(), name="bandpass")
    bandpass.inputs.highpass_sigma = highpass_sigma
    if lowpass_sigma:
        bandpass.inputs.lowpass_sigma = lowpass_sigma
    else:
        bandpass.inputs.lowpass_sigma = tr

    #bids_filename = pe.Node(name='bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
    bids_filename = pe.Node(name='bids_filename',
                            interface=util.Function(
                                function=bids_naming,
                                input_names=inspect.getargspec(bids_naming)[0],
                                output_names=['filename']))
    bids_filename.inputs.metadata = data_selection

    #bids_stim_filename = pe.Node(name='bids_stim_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
    bids_stim_filename = pe.Node(
        name='bids_stim_filename',
        interface=util.Function(function=bids_naming,
                                input_names=inspect.getargspec(bids_naming)[0],
                                output_names=['filename']))
    bids_stim_filename.inputs.suffix = "events"
    bids_stim_filename.inputs.extension = ".tsv"
    bids_stim_filename.inputs.metadata = data_selection

    events_file = pe.Node(
        name='events_file',
        interface=util.Function(
            function=write_events_file,
            input_names=inspect.getargspec(write_events_file)[0],
            output_names=['out_file']))
    events_file.inputs.dummy_scans_ms = DUMMY_SCANS * tr * 1000
    events_file.inputs.stim_protocol_dictionary = STIM_PROTOCOL_DICTIONARY
    events_file.inputs.very_nasty_bruker_delay_hack = very_nasty_bruker_delay_hack
    if not (strict or verbose):
        events_file.inputs.ignore_exception = True

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(measurements_base,
                                               "preprocessing", workflow_name)
    datasink.inputs.parameterization = False
    if not (strict or verbose):
        datasink.inputs.ignore_exception = True

    workflow_connections = [
        (infosource, get_f_scan, [('subject_session', 'selector')]),
        (infosource, bids_stim_filename, [('subject_session',
                                           'subject_session')]),
        (get_f_scan, bids_stim_filename, [('scan_type', 'scan_type')]),
        (get_f_scan, f_bru2nii, [('scan_path', 'input_dir')]),
        (f_bru2nii, dummy_scans, [('nii_file', 'in_file')]),
        (get_f_scan, dummy_scans, [('scan_path', 'scan_dir')]),
        (get_f_scan, events_file, [('trial', 'trial'),
                                   ('scan_path', 'scan_dir')]),
        (events_file, datasink, [('out_file', 'func.@events')]),
        (bids_stim_filename, events_file, [('filename', 'out_file')]),
        (infosource, datasink, [(('subject_session', ss_to_path), 'container')
                                ]),
        (infosource, bids_filename, [('subject_session', 'subject_session')]),
        (get_f_scan, bids_filename, [('scan_type', 'scan_type')]),
        (bids_filename, bandpass, [('filename', 'out_file')]),
        (bandpass, datasink, [('out_file', 'func')]),
    ]

    if realign == "space":
        realigner = pe.Node(interface=spm.Realign(), name="realigner")
        realigner.inputs.register_to_mean = True
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "spacetime":
        realigner = pe.Node(interface=nipy.SpaceTimeRealigner(),
                            name="realigner")
        realigner.inputs.slice_times = "asc_alt_2"
        realigner.inputs.tr = tr
        realigner.inputs.slice_info = 3  #3 for coronal slices (2 for horizontal, 1 for sagittal)
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "time":
        realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
        realigner.inputs.time_repetition = tr
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    #ADDING SELECTABLE NODES AND EXTENDING WORKFLOW AS APPROPRIATE:
    if actual_size:
        s_biascorrect, f_biascorrect = real_size_nodes()
    else:
        s_biascorrect, f_biascorrect = inflated_size_nodes()

    if structural_scan_types.any():
        get_s_scan = pe.Node(
            name='get_s_scan',
            interface=util.Function(
                function=get_scan,
                input_names=inspect.getargspec(get_scan)[0],
                output_names=['scan_path', 'scan_type', 'trial']))
        if not strict:
            get_s_scan.inputs.ignore_exception = True
        get_s_scan.inputs.data_selection = data_selection
        get_s_scan.inputs.measurements_base = measurements_base
        get_s_scan.iterables = ("scan_type", structural_scan_types)

        s_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="s_bru2nii")
        s_bru2nii.inputs.force_conversion = True
        s_bru2nii.inputs.actual_size = actual_size

        #s_bids_filename = pe.Node(name='s_bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
        s_bids_filename = pe.Node(
            name='s_bids_filename',
            interface=util.Function(
                function=bids_naming,
                input_names=inspect.getargspec(bids_naming)[0],
                output_names=['filename']))
        s_bids_filename.inputs.metadata = data_selection

        if actual_size:
            s_register, s_warp, _, _ = DSURQEc_structural_registration(
                template, registration_mask)
            #TODO: incl. in func registration
            if autorotate:
                workflow_connections.extend([
                    (s_biascorrect, s_rotated, [('output_image', 'out_file')]),
                    (s_rotated, s_register, [('out_file', 'moving_image')]),
                ])
            else:
                workflow_connections.extend([
                    (s_biascorrect, s_register, [('output_image',
                                                  'moving_image')]),
                    (s_register, s_warp, [('composite_transform', 'transforms')
                                          ]),
                    (s_bru2nii, s_warp, [('nii_file', 'input_image')]),
                    (s_warp, datasink, [('output_image', 'anat')]),
                ])
        else:
            s_reg_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(),
                                        name="s_reg_biascorrect")
            s_reg_biascorrect.inputs.dimension = 3
            s_reg_biascorrect.inputs.bspline_fitting_distance = 95
            s_reg_biascorrect.inputs.shrink_factor = 2
            s_reg_biascorrect.inputs.n_iterations = [500, 500, 500, 500]
            s_reg_biascorrect.inputs.convergence_threshold = 1e-14

            s_cutoff = pe.Node(interface=fsl.ImageMaths(), name="s_cutoff")
            s_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"

            s_BET = pe.Node(interface=fsl.BET(), name="s_BET")
            s_BET.inputs.mask = True
            s_BET.inputs.frac = 0.3
            s_BET.inputs.robust = True

            s_mask = pe.Node(interface=fsl.ApplyMask(), name="s_mask")
            s_register, s_warp, f_warp = structural_registration(template)

            workflow_connections.extend([
                (s_bru2nii, s_reg_biascorrect, [('nii_file', 'input_image')]),
                (s_reg_biascorrect, s_cutoff, [('output_image', 'in_file')]),
                (s_cutoff, s_BET, [('out_file', 'in_file')]),
                (s_biascorrect, s_mask, [('output_image', 'in_file')]),
                (s_BET, s_mask, [('mask_file', 'mask_file')]),
            ])

            #TODO: incl. in func registration
            if autorotate:
                workflow_connections.extend([
                    (s_mask, s_rotated, [('out_file', 'out_file')]),
                    (s_rotated, s_register, [('out_file', 'moving_image')]),
                ])
            else:
                workflow_connections.extend([
                    (s_mask, s_register, [('out_file', 'moving_image')]),
                    (s_register, s_warp, [('composite_transform', 'transforms')
                                          ]),
                    (s_bru2nii, s_warp, [('nii_file', 'input_image')]),
                    (s_warp, datasink, [('output_image', 'anat')]),
                ])

        if autorotate:
            s_rotated = autorotate(template)

        workflow_connections.extend([
            (infosource, get_s_scan, [('subject_session', 'selector')]),
            (infosource, s_bids_filename, [('subject_session',
                                            'subject_session')]),
            (get_s_scan, s_bru2nii, [('scan_path', 'input_dir')]),
            (get_s_scan, s_bids_filename, [('scan_type', 'scan_type')]),
            (s_bids_filename, s_warp, [('filename', 'output_image')]),
            (s_bru2nii, s_biascorrect, [('nii_file', 'input_image')]),
        ])

    if functional_registration_method == "structural":
        if not structural_scan_types:
            raise ValueError(
                'The option `registration="structural"` requires there to be a structural scan type.'
            )
        workflow_connections.extend([
            (s_register, f_warp, [('composite_transform', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])

    if functional_registration_method == "composite":
        if not structural_scan_types.any():
            raise ValueError(
                'The option `registration="composite"` requires there to be a structural scan type.'
            )
        _, _, f_register, f_warp = DSURQEc_structural_registration(
            template, registration_mask)

        temporal_mean = pe.Node(interface=fsl.MeanImage(),
                                name="temporal_mean")

        merge = pe.Node(util.Merge(2), name='merge')

        workflow_connections.extend([
            (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
            (f_biascorrect, f_register, [('output_image', 'moving_image')]),
            (s_biascorrect, f_register, [('output_image', 'fixed_image')]),
            (f_register, merge, [('composite_transform', 'in1')]),
            (s_register, merge, [('composite_transform', 'in2')]),
            (merge, f_warp, [('out', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, temporal_mean, [('realigned_files', 'in_file')]),
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, temporal_mean, [('out_file', 'in_file')]),
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, temporal_mean, [('slice_time_corrected_file',
                                             'in_file')]),
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, temporal_mean, [('out_file', 'in_file')]),
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])

    elif functional_registration_method == "functional":
        f_register, f_warp = functional_registration(template)

        temporal_mean = pe.Node(interface=fsl.MeanImage(),
                                name="temporal_mean")

        #f_cutoff = pe.Node(interface=fsl.ImageMaths(), name="f_cutoff")
        #f_cutoff.inputs.op_string = "-thrP 30"

        #f_BET = pe.Node(interface=fsl.BET(), name="f_BET")
        #f_BET.inputs.mask = True
        #f_BET.inputs.frac = 0.5

        workflow_connections.extend([
            (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
            #(f_biascorrect, f_cutoff, [('output_image', 'in_file')]),
            #(f_cutoff, f_BET, [('out_file', 'in_file')]),
            #(f_BET, f_register, [('out_file', 'moving_image')]),
            (f_biascorrect, f_register, [('output_image', 'moving_image')]),
            (f_register, f_warp, [('composite_transform', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, temporal_mean, [('realigned_files', 'in_file')]),
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, temporal_mean, [('out_file', 'in_file')]),
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, temporal_mean, [('slice_time_corrected_file',
                                             'in_file')]),
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, temporal_mean, [('out_file', 'in_file')]),
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])

    invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
    if functional_blur_xy and negative_contrast_agent:
        blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
        blur.inputs.fwhmxy = functional_blur_xy
        workflow_connections.extend([
            (f_warp, blur, [('output_image', 'in_file')]),
            (blur, invert, [(('out_file', fslmaths_invert_values), 'op_string')
                            ]),
            (blur, invert, [('out_file', 'in_file')]),
            (invert, bandpass, [('out_file', 'in_file')]),
        ])
    elif functional_blur_xy:
        blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
        blur.inputs.fwhmxy = functional_blur_xy
        workflow_connections.extend([
            (f_warp, blur, [('output_image', 'in_file')]),
            (blur, bandpass, [('out_file', 'in_file')]),
        ])
    elif negative_contrast_agent:
        blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
        blur.inputs.fwhmxy = functional_blur_xy
        workflow_connections.extend([
            (f_warp, invert, [(('output_image', fslmaths_invert_values),
                               'op_string')]),
            (f_warp, invert, [('output_image', 'in_file')]),
            (invert, bandpass, [('out_file', 'in_file')]),
        ])
    else:
        workflow_connections.extend([
            (f_warp, bandpass, [('output_image', 'in_file')]),
        ])

    workflow_config = {
        'execution': {
            'crashdump_dir':
            path.join(measurements_base, 'preprocessing/crashdump'),
        }
    }
    if debug:
        workflow_config['logging'] = {
            'workflow_level': 'DEBUG',
            'utils_level': 'DEBUG',
            'interface_level': 'DEBUG',
            'filemanip_level': 'DEBUG',
            'log_to_file': 'true',
        }

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = path.join(measurements_base, "preprocessing")
    workflow.config = workflow_config
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
    if not keep_work:
        shutil.rmtree(path.join(workflow.base_dir, workdir_name))
Esempio n. 4
0
def bru2bids(
    measurements_base,
    bids_extra=['acq', 'run'],
    dataset_authors=[],
    dataset_license='',
    dataset_name=False,
    debug=False,
    diffusion_match={},
    exclude={},
    functional_match={},
    inflated_size=False,
    keep_crashdump=False,
    keep_work=False,
    measurements=[],
    n_procs=N_PROCS,
    out_base=None,
    structural_match={},
    workflow_name='bids',
):
    """
	Convert and reorganize Bruker "raw" directories (2dseq and ParaVision-formatted metadata files) into a BIDS-organized file hierarchy containing NIfTI files and associated metadata.
	If any exist, this workflow also reposits COSplay event files (already written according to BIDS) in the correct place in the output hierarchy.

	Parameters
	----------

	measurements_base : str
		Path of the top level directory containing all the Bruker ParaVision scan directories to be converted and reformatted.
	bids_extra : list, optional
		List of strings denoting optional BIDS fields to include in the resulting file names.
		Accepted items are 'acq' and 'run'.
	dataset_authors : list of string, optional
		A list of dataset author names, which will be written into the BIDS metadata file.
		Generally not needed, unless this is important for you.
	dataset_license : string, optional
		A dataset license name that will be written into the BIDS metadata file.
		Generally not needed, unless this is important for you.
	dataset_name : string, optional
		A dataset name that will be written into the BIDS metadata file.
		Generally not needed, as by default we use the dataset path to satisfy this BIDS requirement.
	debug : bool, optional
		Whether to enable debug support.
		This prints the data selection before passing it to the nipype workflow management system, and turns on debug support in nipype (leading to more verbose logging).
	diffusion_match : dict, optional
		A dictionary with any combination of "session", "subject", "task", and "acquisition" as keys and corresponding lists of identifiers as values.
		Only diffusion scans matching all identifiers will be included - i.e. this is a whitelist.
	exclude : dict, optional
		A dictionary with any combination of "session", "subject", "task" , and "acquisition" as keys and corresponding identifiers as values.
		Only scans not matching any of the listed criteria will be included in the workfolow - i.e. this is a blacklist (for functional and structural scans).
	functional_match : dict, optional
		A dictionary with any combination of "session", "subject", "task", and "acquisition" as keys and corresponding lists of identifiers as values.
		Only Functional scans matching all identifiers will be included - i.e. this is a whitelist.
	inflated_size : bool, optional
		Whether to inflate the voxel size reported by the scanner when converting the data to NIfTI.
		Setting this to `True` multiplies the voxel edge lengths by 10 (i.e. the volume by 1000); this is occasionally done in some small animal pipelines, which use routines designed exclusively for human data.
		Unless you are looking to reproduce such a workflow, this should be set to `True`.
	keep_crashdump : bool, optional
		Whether to keep the crashdump directory (containing all the crash reports for intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	measurements : list, optional
		Whitelist of Bruker ParaVision scan directories to consider.
	n_procs : int, optional
		Maximum number of processes which to simultaneously spawn for the workflow.
		If not explicitly defined, this is automatically calculated from the number of available cores and under the assumption that the workflow will be the main process running for the duration that it is running.
	out_base : str, optional
		Base directory in which to place the BIDS reposited data.
		If not present the BIDS records will be created in the `measurements_base` directory.
	structural_match : dict, optional
		A dictionary with any combination of "session", "subject", "task", and "acquisition" as keys and corresponding lists of identifiers as values.
		Only structural scans matching all identifiers will be included - i.e. this is a whitelist.
	workflow_name : str, optional
		Top level name for the output directory.
	"""

    measurements_base = path.abspath(path.expanduser(measurements_base))
    if out_base:
        out_base = path.abspath(path.expanduser(out_base))
    else:
        out_base = measurements_base
    out_dir = path.join(out_base, workflow_name)
    workdir_name = workflow_name + '_work'
    workdir = path.join(out_base, workdir_name)

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    # This is needed because BIDS does not yet support CBV
    with open(path.join(out_dir, ".bidsignore"), "w+") as f:
        f.write('*_cbv.*')

    # BIDS needs a descriptor file
    if not dataset_name:
        dataset_name = measurements_base
    description = {
        'Name': dataset_name,
        'BIDSVersion': '1.2.0',
    }
    if dataset_authors:
        description['Authors'] = dataset_authors
    if dataset_license:
        description['License'] = dataset_license
    with open(path.join(out_dir, 'dataset_description.json'), 'w') as f:
        json.dump(description, f, indent=1)
        f.write("\n")

    # define measurement directories to be processed, and populate the list either with the given include_measurements, or with an intelligent selection
    functional_scan_types = diffusion_scan_types = structural_scan_types = []
    data_selection = pd.DataFrame([])
    if structural_match:
        s_data_selection = get_data_selection(
            measurements_base,
            match=structural_match,
            exclude=exclude,
            measurements=measurements,
        )
        print(s_data_selection.columns)
        structural_scan_types = list(s_data_selection['scan_type'].unique())
        struct_ind = s_data_selection.index.tolist()
        data_selection = pd.concat([data_selection, s_data_selection],
                                   sort=True)
    if functional_match:
        f_data_selection = get_data_selection(
            measurements_base,
            match=functional_match,
            exclude=exclude,
            measurements=measurements,
        )
        print(f_data_selection)
        functional_scan_types = list(f_data_selection['scan_type'].unique())
        func_ind = f_data_selection.index.tolist()
        data_selection = pd.concat([data_selection, f_data_selection],
                                   sort=True)
    if diffusion_match:
        d_data_selection = get_data_selection(
            measurements_base,
            match=diffusion_match,
            exclude=exclude,
            measurements=measurements,
        )
        diffusion_scan_types = list(d_data_selection['scan_type'].unique())
        dwi_ind = d_data_selection.index.tolist()
        data_selection = pd.concat([data_selection, d_data_selection],
                                   sort=True)

    # we start to define nipype workflow elements (nodes, connections, meta)
    subjects_sessions = data_selection[["subject", "session"
                                        ]].drop_duplicates().values.tolist()
    if debug:
        print('Data selection:')
        print(data_selection)
        print('Iterating over:')
        print(subjects_sessions)
    infosource = pe.Node(interface=util.IdentityInterface(
        fields=['subject_session'], mandatory_inputs=False),
                         name="infosource")
    infosource.iterables = [('subject_session', subjects_sessions)]

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = out_dir
    datasink.inputs.parameterization = False

    workflow_connections = [
        (infosource, datasink, [(('subject_session', ss_to_path), 'container')
                                ]),
    ]

    if functional_scan_types:
        if not os.path.exists(workdir):
            os.makedirs(workdir)
        f_data_selection.to_csv(path.join(workdir, 'f_data_selection.csv'))
        get_f_scan = pe.Node(
            name='get_f_scan',
            interface=util.Function(
                function=get_bids_scan,
                input_names=inspect.getargspec(get_bids_scan)[0],
                output_names=[
                    'scan_path',
                    'typ',
                    'task',
                    'nii_path',
                    'nii_name',
                    'eventfile_name',
                    'subject_session',
                    'metadata_filename',
                    'dict_slice',
                    'ind_type',
                ]))
        get_f_scan.inputs.ignore_exception = True
        get_f_scan.inputs.data_selection = f_data_selection
        get_f_scan.inputs.bids_base = measurements_base
        get_f_scan.iterables = ("ind_type", func_ind)

        f_bru2nii = pe.Node(interface=Bru2(), name="f_bru2nii")
        f_bru2nii.inputs.actual_size = not inflated_size
        f_bru2nii.inputs.compress = True

        f_metadata_file = pe.Node(
            name='metadata_file',
            interface=util.Function(
                function=write_bids_metadata_file,
                input_names=inspect.getargspec(write_bids_metadata_file)[0],
                output_names=['out_file']))
        f_metadata_file.inputs.extraction_dicts = BIDS_METADATA_EXTRACTION_DICTS

        f_flip = pe.Node(name='f_flip',
                         interface=util.Function(
                             function=flip_if_needed,
                             input_names=inspect.getargspec(flip_if_needed)[0],
                             output_names=['out_file']))
        f_flip.inputs.data_selection = f_data_selection

        events_file = pe.Node(
            name='events_file',
            interface=util.Function(
                function=write_bids_events_file,
                input_names=inspect.getargspec(write_bids_events_file)[0],
                output_names=['out_file']))
        events_file.ignore_exception = True

        physio_file = pe.Node(
            name='physio_file',
            interface=util.Function(
                function=write_bids_physio_file,
                input_names=inspect.getargspec(write_bids_physio_file)[0],
                output_names=['out_file', 'out_metadata_file']))

        workflow_connections = [
            (get_f_scan, datasink, [(('subject_session', ss_to_path),
                                     'container')]),
            (get_f_scan, f_bru2nii, [('scan_path', 'input_dir')]),
            (get_f_scan, f_bru2nii, [('nii_name', 'output_filename')]),
            (get_f_scan, f_flip, [('ind_type', 'ind')]),
            (get_f_scan, f_flip, [('nii_name', 'output_filename')]),
            (f_bru2nii, f_flip, [('nii_file', 'nii_path')]),
            (f_flip, datasink, [('out_file', 'func')]),
            (f_metadata_file, events_file, [('out_file', 'metadata_file')]),
            (f_bru2nii, events_file, [('nii_file', 'timecourse_file')]),
            (get_f_scan, f_metadata_file, [('metadata_filename', 'out_file'),
                                           ('task', 'task'),
                                           ('scan_path', 'scan_dir')]),
            (get_f_scan, events_file, [('eventfile_name', 'out_file'),
                                       ('task', 'task'),
                                       ('scan_path', 'scan_dir')]),
            (get_f_scan, physio_file, [('nii_name', 'nii_name'),
                                       ('scan_path', 'scan_dir')]),
            (events_file, datasink, [('out_file', 'func.@events')]),
            (physio_file, datasink, [('out_file', 'func.@physio')]),
            (physio_file, datasink, [('out_metadata_file', 'func.@meta_physio')
                                     ]),
            (f_metadata_file, datasink, [('out_file', 'func.@metadata')]),
        ]
        crashdump_dir = path.join(out_base, workflow_name + '_crashdump')
        workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
        if debug:
            workflow_config['logging'] = {
                'workflow_level': 'DEBUG',
                'utils_level': 'DEBUG',
                'interface_level': 'DEBUG',
                'filemanip_level': 'DEBUG',
                'log_to_file': 'true',
            }

        workflow = pe.Workflow(name=workdir_name)
        workflow.connect(workflow_connections)
        workflow.base_dir = path.join(out_base)
        workflow.config = workflow_config
        try:
            workflow.write_graph(dotfilename=path.join(workflow.base_dir,
                                                       workdir_name,
                                                       "graph_functional.dot"),
                                 graph2use="hierarchical",
                                 format="png")
        except OSError:
            print(
                'We could not write the DOT file for visualization (`dot` function from the graphviz package). This is non-critical to the processing, but you should get this fixed.'
            )

        #Execute the workflow
        if not keep_work or not keep_crashdump:
            try:
                workflow.run(plugin="MultiProc",
                             plugin_args={'n_procs': n_procs})
            except RuntimeError:
                pass
        else:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
        if not keep_work:
            shutil.rmtree(path.join(workflow.base_dir, workdir_name))
        if not keep_crashdump:
            try:
                shutil.rmtree(crashdump_dir)
            except (FileNotFoundError, OSError):
                pass

    if diffusion_scan_types:
        # We check for the directory, since it gets deleted after a successful execution.
        if not os.path.exists(workdir):
            os.makedirs(workdir)
        d_data_selection.to_csv(path.join(workdir, 'd_data_selection.csv'))
        get_d_scan = pe.Node(
            name='get_d_scan',
            interface=util.Function(
                function=get_bids_scan,
                input_names=inspect.getargspec(get_bids_scan)[0],
                output_names=[
                    'scan_path',
                    'typ',
                    'task',
                    'nii_path',
                    'nii_name',
                    'eventfile_name',
                    'subject_session',
                    'metadata_filename',
                    'dict_slice',
                    'ind_type',
                ]))
        get_d_scan.inputs.ignore_exception = True
        get_d_scan.inputs.data_selection = d_data_selection
        get_d_scan.inputs.extra = ['acq']
        get_d_scan.inputs.bids_base = measurements_base
        get_d_scan.iterables = ("ind_type", dwi_ind)

        d_bru2nii = pe.Node(interface=Bru2(), name="d_bru2nii")
        d_bru2nii.inputs.force_conversion = True
        d_bru2nii.inputs.actual_size = not inflated_size
        d_bru2nii.inputs.compress = True

        d_metadata_file = pe.Node(
            name='metadata_file',
            interface=util.Function(
                function=write_bids_metadata_file,
                input_names=inspect.getargspec(write_bids_metadata_file)[0],
                output_names=['out_file']))
        d_metadata_file.inputs.extraction_dicts = BIDS_METADATA_EXTRACTION_DICTS

        workflow_connections = [
            (get_d_scan, datasink, [(('subject_session', ss_to_path),
                                     'container')]),
            (get_d_scan, d_bru2nii, [('scan_path', 'input_dir')]),
            (get_d_scan, d_bru2nii, [('nii_name', 'output_filename')]),
            (d_bru2nii, datasink, [('nii_file', 'dwi')]),
            (get_d_scan, d_metadata_file, [('metadata_filename', 'out_file'),
                                           ('task', 'task'),
                                           ('scan_path', 'scan_dir')]),
            (d_metadata_file, datasink, [('out_file', 'dwi.@metadata')]),
        ]

        crashdump_dir = path.join(out_base, workflow_name + '_crashdump')
        workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
        if debug:
            workflow_config['logging'] = {
                'workflow_level': 'DEBUG',
                'utils_level': 'DEBUG',
                'interface_level': 'DEBUG',
                'filemanip_level': 'DEBUG',
                'log_to_file': 'true',
            }

        workflow = pe.Workflow(name=workdir_name)
        workflow.connect(workflow_connections)
        workflow.base_dir = path.join(out_base)
        workflow.config = workflow_config
        try:
            workflow.write_graph(dotfilename=path.join(workflow.base_dir,
                                                       workdir_name,
                                                       "graph_diffusion.dot"),
                                 graph2use="hierarchical",
                                 format="png")
        except OSError:
            print(
                'We could not write the DOT file for visualization (`dot` function from the graphviz package). This is non-critical to the processing, but you should get this fixed.'
            )

        #Execute the workflow
        if not keep_work or not keep_crashdump:
            try:
                workflow.run(plugin="MultiProc",
                             plugin_args={'n_procs': n_procs})
            except RuntimeError:
                pass
        else:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
        if not keep_work:
            shutil.rmtree(path.join(workflow.base_dir, workdir_name))
        if not keep_crashdump:
            try:
                shutil.rmtree(crashdump_dir)
            except (FileNotFoundError, OSError):
                pass

    if structural_scan_types:
        # We check for the directory, since it gets deleted after a successful execution.
        if not os.path.exists(workdir):
            os.makedirs(workdir)
        s_data_selection.to_csv(path.join(workdir, 's_data_selection.csv'))
        get_s_scan = pe.Node(
            name='get_s_scan',
            interface=util.Function(
                function=get_bids_scan,
                input_names=inspect.getargspec(get_bids_scan)[0],
                output_names=[
                    'scan_path',
                    'typ',
                    'task',
                    'nii_path',
                    'nii_name',
                    'eventfile_name',
                    'subject_session',
                    'metadata_filename',
                    'dict_slice',
                    'ind_type',
                ]))
        get_s_scan.inputs.ignore_exception = True
        get_s_scan.inputs.data_selection = s_data_selection
        get_s_scan.inputs.extra = ['acq']
        get_s_scan.inputs.bids_base = measurements_base
        get_s_scan.iterables = ("ind_type", struct_ind)

        s_bru2nii = pe.Node(interface=Bru2(), name="s_bru2nii")
        s_bru2nii.inputs.force_conversion = True
        s_bru2nii.inputs.actual_size = not inflated_size
        s_bru2nii.inputs.compress = True

        s_metadata_file = pe.Node(
            name='metadata_file',
            interface=util.Function(
                function=write_bids_metadata_file,
                input_names=inspect.getargspec(write_bids_metadata_file)[0],
                output_names=['out_file']))
        s_metadata_file.inputs.extraction_dicts = BIDS_METADATA_EXTRACTION_DICTS

        s_flip = pe.Node(name='s_flip',
                         interface=util.Function(
                             function=flip_if_needed,
                             input_names=inspect.getargspec(flip_if_needed)[0],
                             output_names=['out_file']))
        s_flip.inputs.data_selection = s_data_selection

        workflow_connections = [
            (get_s_scan, datasink, [(('subject_session', ss_to_path),
                                     'container')]),
            (get_s_scan, s_bru2nii, [('scan_path', 'input_dir')]),
            (get_s_scan, s_bru2nii, [('nii_name', 'output_filename')]),
            (get_s_scan, s_flip, [('ind_type', 'ind')]),
            (get_s_scan, s_flip, [('nii_name', 'output_filename')]),
            (s_bru2nii, s_flip, [('nii_file', 'nii_path')]),
            (s_flip, datasink, [('out_file', 'anat')]),
            (get_s_scan, s_metadata_file, [('metadata_filename', 'out_file'),
                                           ('task', 'task'),
                                           ('scan_path', 'scan_dir')]),
            (s_metadata_file, datasink, [('out_file', 'anat.@metadata')]),
        ]
        crashdump_dir = path.join(out_base, workflow_name + '_crashdump')
        workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
        if debug:
            workflow_config['logging'] = {
                'workflow_level': 'DEBUG',
                'utils_level': 'DEBUG',
                'interface_level': 'DEBUG',
                'filemanip_level': 'DEBUG',
                'log_to_file': 'true',
            }

        workflow = pe.Workflow(name=workdir_name)
        workflow.connect(workflow_connections)
        workflow.base_dir = path.join(out_base)
        workflow.config = workflow_config
        try:
            workflow.write_graph(dotfilename=path.join(workflow.base_dir,
                                                       workdir_name,
                                                       "graph_structural.dot"),
                                 graph2use="hierarchical",
                                 format="png")
        except OSError:
            print(
                'We could not write the DOT file for visualization (`dot` function from the graphviz package). This is non-critical to the processing, but you should get this fixed.'
            )

        #Execute the workflow
        if not keep_work or not keep_crashdump:
            try:
                workflow.run(plugin="MultiProc",
                             plugin_args={'n_procs': n_procs})
            except RuntimeError:
                pass
        else:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
        if not keep_work:
            shutil.rmtree(path.join(workflow.base_dir, workdir_name))
        if not keep_crashdump:
            try:
                shutil.rmtree(crashdump_dir)
            except (FileNotFoundError, OSError):
                pass

    # Create essions files
    sessions_file(out_dir, data_selection)

    # Introduce the notion of validation:
    print(
        '\n'
        'USER NOTICE:\n'
        'To ensure conformity with the most recent release of the BIDS standard, you may want to submit the dataset to the online\n'
        'validator (this will *not* require you to actually upload any of the data):\n'
        'https://bids-standard.github.io/bids-validator/')
Esempio n. 5
0
def bru2bids(
    measurements_base,
    measurements=[],
    actual_size=True,
    debug=False,
    exclude={},
    functional_match={},
    keep_crashdump=False,
    keep_work=False,
    n_procs=N_PROCS,
    structural_match={},
):
    """
	Convert and reorganize Bruker "raw" directories (2dseq and ParaVision-formatted metadata files) into a BIDS-organized file hierarchy containing NIfTI files and associated metadata.
	If any exist, this workflow also reposits COSplay event files (already written according to BIDS) in the correct place in the output hierarchy.

	Parameters
	----------

	measurements_base : str
		Path of the top level directory containing all the Bruker scan directories to be converted and reformatted.
	actual_size : bool, optional
		Whether to conserve the voxel size reported by the scanner when converting the data to NIfTI.
		Setting this to `False` multiplies the voxel edge lengths by 10 (i.e. the volume by 1000); this is occasionally done in hackish small animal pipelines, which use routines designed exclusively for human data.
		Unless you are looking to reproduce such a workflow, this should be set to `True`.
	debug : bool, optional
		Whether to enable debug support.
		This prints the data selection before passing it to the nipype workflow management system, and turns on debug support in nipype (leading to more verbose logging).
	exclude : dict, optional
		A dictionary with any combination of "session", "subject", "trial" , and "acquisition" as keys and corresponding identifiers as values.
		Only scans not matching any of the listed criteria will be included in the workfolow - i.e. this is a blacklist (for functional and structural scans).
	functional_match : dict, optional
		A dictionary with any combination of "session", "subject", "trial", and "acquisition" as keys and corresponding lists of identifiers as values.
		Functional scans matching all identifiers will be included - i.e. this is a whitelist.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	keep_crashdump : bool, optional
		Whether to keep the crashdump directory (containing all the crash reports for intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	n_procs : int, optional
		Maximum number of processes which to simultaneously spawn for the workflow.
		If not explicitly defined, this is automatically calculated from the number of available cores and under the assumption that the workflow will be the main process running for the duration that it is running.
	structural_match : dict, optional
		A dictionary with any combination of "session", "subject", "trial", and "acquisition" as keys and corresponding lists of identifiers as values.
		Functional scans matching all identifiers will be included - i.e. this is a whitelist.
	"""

    measurements_base = path.abspath(path.expanduser(measurements_base))

    # define measurement directories to be processed, and populate the list either with the given include_measurements, or with an intelligent selection
    data_selection = pd.DataFrame([])
    if structural_match:
        s_data_selection = get_data_selection(
            measurements_base,
            match=structural_match,
            exclude=exclude,
            measurements=measurements,
        )
        structural_scan_types = s_data_selection['scan_type'].unique()
        data_selection = pd.concat([data_selection, s_data_selection])
    if functional_match:
        f_data_selection = get_data_selection(
            measurements_base,
            match=functional_match,
            exclude=exclude,
            measurements=measurements,
        )
        functional_scan_types = f_data_selection['scan_type'].unique()
        data_selection = pd.concat([data_selection, f_data_selection])

    # we start to define nipype workflow elements (nodes, connections, meta)
    subjects_sessions = data_selection[["subject", "session"
                                        ]].drop_duplicates().values.tolist()
    if debug:
        print('Data selection:')
        print(data_selection)
        print('Iterating over:')
        print(subjects_sessions)
    infosource = pe.Node(interface=util.IdentityInterface(
        fields=['subject_session'], mandatory_inputs=False),
                         name="infosource")
    infosource.iterables = [('subject_session', subjects_sessions)]

    get_f_scan = pe.Node(name='get_f_scan',
                         interface=util.Function(
                             function=get_scan,
                             input_names=inspect.getargspec(get_scan)[0],
                             output_names=['scan_path', 'scan_type', 'trial']))
    get_f_scan.inputs.ignore_exception = True
    get_f_scan.inputs.data_selection = data_selection
    get_f_scan.inputs.measurements_base = measurements_base
    get_f_scan.iterables = ("scan_type", functional_scan_types)

    f_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="f_bru2nii")
    f_bru2nii.inputs.actual_size = actual_size

    f_filename = pe.Node(name='f_filename',
                         interface=util.Function(
                             function=bids_naming,
                             input_names=inspect.getargspec(bids_naming)[0],
                             output_names=['filename']))
    f_filename.inputs.metadata = data_selection
    f_filename.inputs.extension = ''

    f_metadata_filename = pe.Node(
        name='f_metadata_filename',
        interface=util.Function(function=bids_naming,
                                input_names=inspect.getargspec(bids_naming)[0],
                                output_names=['filename']))
    f_metadata_filename.inputs.extension = ".json"
    f_metadata_filename.inputs.metadata = data_selection

    events_filename = pe.Node(
        name='bids_stim_filename',
        interface=util.Function(function=bids_naming,
                                input_names=inspect.getargspec(bids_naming)[0],
                                output_names=['filename']))
    events_filename.inputs.suffix = "events"
    events_filename.inputs.extension = ".tsv"
    events_filename.inputs.metadata = data_selection
    events_filename.ignore_exception = True

    f_metadata_file = pe.Node(
        name='metadata_file',
        interface=util.Function(
            function=write_bids_metadata_file,
            input_names=inspect.getargspec(write_bids_metadata_file)[0],
            output_names=['out_file']))
    f_metadata_file.inputs.extraction_dicts = BIDS_METADATA_EXTRACTION_DICTS

    events_file = pe.Node(
        name='events_file',
        interface=util.Function(
            function=write_events_file,
            input_names=inspect.getargspec(write_events_file)[0],
            output_names=['out_file']))
    events_file.ignore_exception = True

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(measurements_base, "bids")
    datasink.inputs.parameterization = False

    workflow_connections = [
        (infosource, datasink, [(('subject_session', ss_to_path), 'container')
                                ]),
        (infosource, get_f_scan, [('subject_session', 'selector')]),
        (infosource, f_metadata_filename, [('subject_session',
                                            'subject_session')]),
        (infosource, f_filename, [('subject_session', 'subject_session')]),
        (infosource, events_filename, [('subject_session', 'subject_session')
                                       ]),
        (get_f_scan, f_metadata_filename, [('scan_type', 'scan_type')]),
        (get_f_scan, f_filename, [('scan_type', 'scan_type')]),
        (get_f_scan, f_bru2nii, [('scan_path', 'input_dir')]),
        (get_f_scan, f_metadata_file, [('scan_path', 'scan_dir')]),
        (f_metadata_filename, f_metadata_file, [('filename', 'out_file')]),
        (f_filename, f_bru2nii, [('filename', 'output_filename')]),
        (events_filename, events_file, [('filename', 'out_file')]),
        (f_metadata_file, events_file, [('out_file', 'metadata_file')]),
        (f_bru2nii, events_file, [('nii_file', 'timecourse_file')]),
        (get_f_scan, events_filename, [('scan_type', 'scan_type')]),
        (f_bru2nii, datasink, [('nii_file', 'func')]),
        (get_f_scan, events_file, [('trial', 'trial'),
                                   ('scan_path', 'scan_dir')]),
        (events_file, datasink, [('out_file', 'func.@events')]),
        (f_metadata_file, datasink, [('out_file', 'func.@metadata')]),
    ]

    crashdump_dir = path.join(measurements_base, 'bids_crashdump')
    workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
    if debug:
        workflow_config['logging'] = {
            'workflow_level': 'DEBUG',
            'utils_level': 'DEBUG',
            'interface_level': 'DEBUG',
            'filemanip_level': 'DEBUG',
            'log_to_file': 'true',
        }

    workdir_name = 'bids_work'
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = path.join(measurements_base)
    workflow.config = workflow_config
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    if not keep_work or not keep_crashdump:
        try:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
        except RuntimeError:
            pass
    else:
        workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
    if not keep_work:
        shutil.rmtree(path.join(workflow.base_dir, workdir_name))
    if not keep_crashdump:
        try:
            shutil.rmtree(crashdump_dir)
        except FileNotFoundError:
            pass

    try:
        if structural_scan_types.any():
            get_s_scan = pe.Node(
                name='get_s_scan',
                interface=util.Function(
                    function=get_scan,
                    input_names=inspect.getargspec(get_scan)[0],
                    output_names=['scan_path', 'scan_type', 'trial']))
            get_s_scan.inputs.ignore_exception = True
            get_s_scan.inputs.data_selection = data_selection
            get_s_scan.inputs.measurements_base = measurements_base
            get_s_scan.iterables = ("scan_type", structural_scan_types)

            s_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="s_bru2nii")
            s_bru2nii.inputs.force_conversion = True
            s_bru2nii.inputs.actual_size = actual_size

            s_filename = pe.Node(
                name='s_filename',
                interface=util.Function(
                    function=bids_naming,
                    input_names=inspect.getargspec(bids_naming)[0],
                    output_names=['filename']))
            s_filename.inputs.metadata = data_selection
            s_filename.inputs.extension = ''

            s_metadata_filename = pe.Node(
                name='s_metadata_filename',
                interface=util.Function(
                    function=bids_naming,
                    input_names=inspect.getargspec(bids_naming)[0],
                    output_names=['filename']))
            s_metadata_filename.inputs.extension = ".json"
            s_metadata_filename.inputs.metadata = data_selection

            s_metadata_file = pe.Node(name='s_metadata_file',
                                      interface=util.Function(
                                          function=write_bids_metadata_file,
                                          input_names=inspect.getargspec(
                                              write_bids_metadata_file)[0],
                                          output_names=['out_file']))
            s_metadata_file.inputs.extraction_dicts = BIDS_METADATA_EXTRACTION_DICTS

            workflow_connections = [
                (infosource, datasink, [(('subject_session', ss_to_path),
                                         'container')]),
                (infosource, get_s_scan, [('subject_session', 'selector')]),
                (infosource, s_filename, [('subject_session',
                                           'subject_session')]),
                (infosource, s_metadata_filename, [('subject_session',
                                                    'subject_session')]),
                (get_s_scan, s_bru2nii, [('scan_path', 'input_dir')]),
                (get_s_scan, s_filename, [('scan_type', 'scan_type')]),
                (get_s_scan, s_metadata_filename, [('scan_type', 'scan_type')
                                                   ]),
                (get_s_scan, s_metadata_file, [('scan_path', 'scan_dir')]),
                (s_filename, s_bru2nii, [('filename', 'output_filename')]),
                (s_metadata_filename, s_metadata_file, [('filename',
                                                         'out_file')]),
                (s_bru2nii, datasink, [('nii_file', 'anat')]),
                (s_metadata_file, datasink, [('out_file', 'anat.@metadata')]),
            ]
            crashdump_dir = path.join(measurements_base, 'bids_crashdump')
            workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
            if debug:
                workflow_config['logging'] = {
                    'workflow_level': 'DEBUG',
                    'utils_level': 'DEBUG',
                    'interface_level': 'DEBUG',
                    'filemanip_level': 'DEBUG',
                    'log_to_file': 'true',
                }

            workdir_name = 'bids_work'
            workflow = pe.Workflow(name=workdir_name)
            workflow.connect(workflow_connections)
            workflow.base_dir = path.join(measurements_base)
            workflow.config = workflow_config
            workflow.write_graph(dotfilename=path.join(workflow.base_dir,
                                                       workdir_name,
                                                       "graph.dot"),
                                 graph2use="hierarchical",
                                 format="png")

            if not keep_work or not keep_crashdump:
                try:
                    workflow.run(plugin="MultiProc",
                                 plugin_args={'n_procs': n_procs})
                except RuntimeError:
                    pass
            else:
                workflow.run(plugin="MultiProc",
                             plugin_args={'n_procs': n_procs})
            if not keep_work:
                shutil.rmtree(path.join(workflow.base_dir, workdir_name))
            if not keep_crashdump:
                try:
                    shutil.rmtree(crashdump_dir)
                except FileNotFoundError:
                    pass
    except UnboundLocalError:
        pass
Esempio n. 6
0
def bruker(measurements_base,
	functional_scan_types=[],
	structural_scan_types=[],
	sessions=[],
	subjects=[],
	measurements=[],
	exclude_subjects=[],
	exclude_measurements=[],
	actual_size=False,
	functional_blur_xy=False,
	functional_registration_method="structural",
	highpass_sigma=225,
	lowpass_sigma=None,
	negative_contrast_agent=False,
	n_procs=N_PROCS,
	realign=True,
	registration_mask=False,
	template="/home/chymera/ni_data/templates/ds_QBI_chr.nii.gz",
	tr=1,
	very_nasty_bruker_delay_hack=False,
	workflow_name="generic",
	keep_work=False,
	autorotate=False,
	strict=False,
	):

	measurements_base = os.path.abspath(os.path.expanduser(measurements_base))

	#select all functional/sturctural scan types unless specified
	if not functional_scan_types or not structural_scan_types:
		scan_classification = pd.read_csv(scan_classification_file_path)
		if not functional_scan_types:
			functional_scan_types = list(scan_classification[(scan_classification["categories"] == "functional")]["scan_type"])
		if not structural_scan_types:
			structural_scan_types = list(scan_classification[(scan_classification["categories"] == "structural")]["scan_type"])

	#hack to allow structural scan type disabling:
	if structural_scan_types == -1:
		structural_scan_types = []

	# define measurement directories to be processed, and populate the list either with the given include_measurements, or with an intelligent selection
	scan_types = deepcopy(functional_scan_types)
	scan_types.extend(structural_scan_types)
	data_selection=get_data_selection(measurements_base, sessions, scan_types=scan_types, subjects=subjects, exclude_subjects=exclude_subjects, measurements=measurements, exclude_measurements=exclude_measurements)
	if not subjects:
		subjects = set(list(data_selection["subject"]))
	if not sessions:
		sessions = set(list(data_selection["session"]))

	if structural_registration:
		structural_scan_types = [structural_scan_types[0]]

	# here we start to define the nipype workflow elements (nodes, connectons, meta)
	subjects_sessions = data_selection[["subject","session"]].drop_duplicates().values.tolist()
	infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_session']), name="infosource")
	infosource.iterables = [('subject_session', subjects_sessions)]

	get_f_scan = pe.Node(name='get_f_scan', interface=util.Function(function=get_scan,input_names=inspect.getargspec(get_scan)[0], output_names=['scan_path','scan_type']))
	if not strict:
		get_f_scan.inputs.ignore_exception = True
	get_f_scan.inputs.data_selection = data_selection
	get_f_scan.inputs.measurements_base = measurements_base
	get_f_scan.iterables = ("scan_type", functional_scan_types)

	f_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="f_bru2nii")
	f_bru2nii.inputs.actual_size=actual_size

	dummy_scans = pe.Node(name='dummy_scans', interface=util.Function(function=force_dummy_scans,input_names=inspect.getargspec(force_dummy_scans)[0], output_names=['out_file']))
	dummy_scans.inputs.desired_dummy_scans = DUMMY_SCANS

	events_file = pe.Node(name='events_file', interface=util.Function(function=write_events_file,input_names=inspect.getargspec(write_events_file)[0], output_names=['out_file']))
	events_file.inputs.dummy_scans_ms = DUMMY_SCANS * tr * 1000
	events_file.inputs.stim_protocol_dictionary = STIM_PROTOCOL_DICTIONARY
	events_file.inputs.very_nasty_bruker_delay_hack = very_nasty_bruker_delay_hack

	if realign:
		realigner = pe.Node(interface=nipy.SpaceTimeRealigner(), name="realigner")
		realigner.inputs.slice_times = "asc_alt_2"
		realigner.inputs.tr = tr
		realigner.inputs.slice_info = 3 #3 for coronal slices (2 for horizontal, 1 for sagittal)

	bandpass = pe.Node(interface=fsl.maths.TemporalFilter(), name="bandpass")
	bandpass.inputs.highpass_sigma = highpass_sigma
	if lowpass_sigma:
		bandpass.inputs.lowpass_sigma = lowpass_sigma
	else:
		bandpass.inputs.lowpass_sigma = tr

	bids_filename = pe.Node(name='bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))

	bids_stim_filename = pe.Node(name='bids_stim_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
	bids_stim_filename.inputs.suffix = "events"
	bids_stim_filename.inputs.extension = ".tsv"

	datasink = pe.Node(nio.DataSink(), name='datasink')
	datasink.inputs.base_directory = path.join(measurements_base,"preprocessing",workflow_name)
	datasink.inputs.parameterization = False

	workflow_connections = [
		(infosource, get_f_scan, [('subject_session', 'selector')]),
		(infosource, bids_stim_filename, [('subject_session', 'subject_session')]),
		(get_f_scan, bids_stim_filename, [('scan_type', 'scan')]),
		(get_f_scan, f_bru2nii, [('scan_path', 'input_dir')]),
		(f_bru2nii, dummy_scans, [('nii_file', 'in_file')]),
		(get_f_scan, dummy_scans, [('scan_path', 'scan_dir')]),
		(get_f_scan, events_file, [
			('scan_type', 'scan_type'),
			('scan_path', 'scan_dir')
			]),
		(events_file, datasink, [('out_file', 'func.@events')]),
		(bids_stim_filename, events_file, [('filename', 'out_file')]),
		(infosource, datasink, [(('subject_session',ss_to_path), 'container')]),
		(infosource, bids_filename, [('subject_session', 'subject_session')]),
		(get_f_scan, bids_filename, [('scan_type', 'scan')]),
		(bids_filename, bandpass, [('filename', 'out_file')]),
		(bandpass, datasink, [('out_file', 'func')]),
		]

	if realign:
		workflow_connections.extend([
			(dummy_scans, realigner, [('out_file', 'in_file')]),
			])

	#ADDING SELECTABLE NODES AND EXTENDING WORKFLOW AS APPROPRIATE:
	if structural_scan_types:
		get_s_scan = pe.Node(name='get_s_scan', interface=util.Function(function=get_scan,input_names=inspect.getargspec(get_scan)[0], output_names=['scan_path','scan_type']))
		if not strict:
			get_s_scan.inputs.ignore_exception = True
		get_s_scan.inputs.data_selection = data_selection
		get_s_scan.inputs.measurements_base = measurements_base
		get_s_scan.iterables = ("scan_type", structural_scan_types)

		s_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="s_bru2nii")
		s_bru2nii.inputs.force_conversion=True
		s_bru2nii.inputs.actual_size=actual_size

		if "DSURQEc" in template:
			s_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="s_biascorrect")
			s_biascorrect.inputs.dimension = 3
			s_biascorrect.inputs.bspline_fitting_distance = 10
			s_biascorrect.inputs.bspline_order = 4
			s_biascorrect.inputs.shrink_factor = 2
			s_biascorrect.inputs.n_iterations = [150,100,50,30]
			s_biascorrect.inputs.convergence_threshold = 1e-16
			s_register, s_warp, _, _ = DSURQEc_structural_registration(template, registration_mask)
			#TODO: incl. in func registration
			if autorotate:
				workflow_connections.extend([
					(s_biascorrect, s_rotated, [('output_image', 'out_file')]),
					(s_rotated, s_register, [('out_file', 'moving_image')]),
					])
			else:
				workflow_connections.extend([
					(s_biascorrect, s_register, [('output_image', 'moving_image')]),
					(s_register, s_warp, [('composite_transform', 'transforms')]),
					(s_bru2nii, s_warp, [('nii_file', 'input_image')]),
					(s_warp, datasink, [('output_image', 'anat')]),
					])
		else:
			s_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="s_biascorrect")
			s_biascorrect.inputs.dimension = 3
			s_biascorrect.inputs.bspline_fitting_distance = 100
			s_biascorrect.inputs.shrink_factor = 2
			s_biascorrect.inputs.n_iterations = [200,200,200,200]
			s_biascorrect.inputs.convergence_threshold = 1e-11

			s_reg_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="s_reg_biascorrect")
			s_reg_biascorrect.inputs.dimension = 3
			s_reg_biascorrect.inputs.bspline_fitting_distance = 95
			s_reg_biascorrect.inputs.shrink_factor = 2
			s_reg_biascorrect.inputs.n_iterations = [500,500,500,500]
			s_reg_biascorrect.inputs.convergence_threshold = 1e-14

			s_cutoff = pe.Node(interface=fsl.ImageMaths(), name="s_cutoff")
			s_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"

			s_BET = pe.Node(interface=fsl.BET(), name="s_BET")
			s_BET.inputs.mask = True
			s_BET.inputs.frac = 0.3
			s_BET.inputs.robust = True

			s_mask = pe.Node(interface=fsl.ApplyMask(), name="s_mask")
			s_register, s_warp, _, _ = structural_registration(template)

			workflow_connections.extend([
				(s_bru2nii, s_reg_biascorrect, [('nii_file', 'input_image')]),
				(s_reg_biascorrect, s_cutoff, [('output_image', 'in_file')]),
				(s_cutoff, s_BET, [('out_file', 'in_file')]),
				(s_biascorrect, s_mask, [('output_image', 'in_file')]),
				(s_BET, s_mask, [('mask_file', 'mask_file')]),
				])

			#TODO: incl. in func registration
			if autorotate:
				workflow_connections.extend([
					(s_mask, s_rotated, [('out_file', 'out_file')]),
					(s_rotated, s_register, [('out_file', 'moving_image')]),
					])
			else:
				workflow_connections.extend([
					(s_mask, s_register, [('out_file', 'moving_image')]),
					(s_register, s_warp, [('composite_transform', 'transforms')]),
					(s_bru2nii, s_warp, [('nii_file', 'input_image')]),
					(s_warp, datasink, [('output_image', 'anat')]),
					])


		if(autorotate):
			s_rotated = autorotate(template)

		s_bids_filename = pe.Node(name='s_bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
		s_bids_filename.inputs.scan_prefix = False

		workflow_connections.extend([
			(infosource, get_s_scan, [('subject_session', 'selector')]),
			(infosource, s_bids_filename, [('subject_session', 'subject_session')]),
			(get_s_scan, s_bru2nii, [('scan_path','input_dir')]),
			(get_s_scan, s_bids_filename, [('scan_type', 'scan')]),
			(s_bids_filename, s_warp, [('filename','output_image')]),
			(s_bru2nii, s_biascorrect, [('nii_file', 'input_image')]),
			])



	if functional_registration_method == "structural":
		if not structural_scan_types:
			raise ValueError('The option `registration="structural"` requires there to be a structural scan type.')
		workflow_connections.extend([
			(s_register, f_warp, [('composite_transform', 'transforms')]),
			])
		if realign:
			workflow_connections.extend([
				(realigner, f_warp, [('out_file', 'input_image')]),
				])
		else:
			workflow_connections.extend([
				(dummy_scans, f_warp, [('out_file', 'input_image')]),
				])


	if functional_registration_method == "composite":
		if not structural_scan_types:
			raise ValueError('The option `registration="composite"` requires there to be a structural scan type.')
		_, _, f_register, f_warp = DSURQEc_structural_registration(template, registration_mask)

		temporal_mean = pe.Node(interface=fsl.MeanImage(), name="temporal_mean")

		f_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="f_biascorrect")
		f_biascorrect.inputs.dimension = 3
		f_biascorrect.inputs.bspline_fitting_distance = 100
		f_biascorrect.inputs.shrink_factor = 2
		f_biascorrect.inputs.n_iterations = [200,200,200,200]
		f_biascorrect.inputs.convergence_threshold = 1e-11

		merge = pe.Node(util.Merge(2), name='merge')

		workflow_connections.extend([
			(temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
			(f_biascorrect, f_register, [('output_image', 'moving_image')]),
			(s_biascorrect, f_register, [('output_image', 'fixed_image')]),
			(f_register, merge, [('composite_transform', 'in1')]),
			(s_register, merge, [('composite_transform', 'in2')]),
			(merge, f_warp, [('out', 'transforms')]),
			])
		if realign:
			workflow_connections.extend([
				(realigner, temporal_mean, [('out_file', 'in_file')]),
				(realigner, f_warp, [('out_file', 'input_image')]),
				])
		else:
			workflow_connections.extend([
				(dummy_scans, temporal_mean, [('out_file', 'input_image')]),
				(dummy_scans, f_warp, [('out_file', 'input_image')]),
				])

	elif functional_registration_method == "functional":
		f_register, f_warp = functional_registration(template)

		temporal_mean = pe.Node(interface=fsl.MeanImage(), name="temporal_mean")

		f_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="f_biascorrect")
		f_biascorrect.inputs.dimension = 3
		f_biascorrect.inputs.bspline_fitting_distance = 100
		f_biascorrect.inputs.shrink_factor = 2
		f_biascorrect.inputs.n_iterations = [200,200,200,200]
		f_biascorrect.inputs.convergence_threshold = 1e-11

		f_cutoff = pe.Node(interface=fsl.ImageMaths(), name="f_cutoff")
		f_cutoff.inputs.op_string = "-thrP 30"

		f_BET = pe.Node(interface=fsl.BET(), name="f_BET")
		f_BET.inputs.mask = True
		f_BET.inputs.frac = 0.5

		workflow_connections.extend([
			(temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
			(f_biascorrect, f_cutoff, [('output_image', 'in_file')]),
			(f_cutoff, f_BET, [('out_file', 'in_file')]),
			(f_BET, register, [('out_file', 'moving_image')]),
			(register, f_warp, [('composite_transform', 'transforms')]),
			])
		if realign:
			workflow_connections.extend([
				(realigner, temporal_mean, [('out_file', 'in_file')]),
				(realigner, f_warp, [('out_file', 'input_image')]),
				])
		else:
			workflow_connections.extend([
				(dummy_scans, temporal_mean, [('out_file', 'input_image')]),
				(dummy_scans, f_warp, [('out_file', 'input_image')]),
				])


	invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
	if functional_blur_xy and negative_contrast_agent:
		blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
		blur.inputs.fwhmxy = functional_blur_xy
		workflow_connections.extend([
			(f_warp, blur, [('output_image', 'in_file')]),
			(blur, invert, [(('out_file', fslmaths_invert_values), 'op_string')]),
			(blur, invert, [('out_file', 'in_file')]),
			(invert, bandpass, [('out_file', 'in_file')]),
			])
	elif functional_blur_xy:
		blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
		blur.inputs.fwhmxy = functional_blur_xy
		workflow_connections.extend([
			(f_warp, blur, [('output_image', 'in_file')]),
			(blur, bandpass, [('out_file', 'in_file')]),
			])
	elif negative_contrast_agent:
		blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
		blur.inputs.fwhmxy = functional_blur_xy
		workflow_connections.extend([
			(f_warp, invert, [(('output_image', fslmaths_invert_values), 'op_string')]),
			(f_warp, invert, [('output_image', 'in_file')]),
			(invert, bandpass, [('out_file', 'in_file')]),
			])
	else:
		workflow_connections.extend([
			(f_warp, bandpass, [('output_image', 'in_file')]),
			])

	workdir_name = workflow_name+"_work"
	workflow = pe.Workflow(name=workdir_name)
	workflow.connect(workflow_connections)
	workflow.base_dir = path.join(measurements_base,"preprocessing")
	workflow.config = {"execution": {"crashdump_dir": path.join(measurements_base,"preprocessing/crashdump")}}
	workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")

	workflow.run(plugin="MultiProc",  plugin_args={'n_procs' : n_procs})
	if not keep_work:
		shutil.rmtree(path.join(workflow.base_dir,workdir_name))
Esempio n. 7
0
def metadata():
	from samri.pipelines.extra_functions import get_data_selection
	info = get_data_selection('~/ni_data/test')
	print(info)
Esempio n. 8
0
def bru2bids(measurements_base,
	measurements=[],
	inflated_size=False,
	bids_extra=['acq','run'],
	dataset_name=False,
	debug=False,
	diffusion_match={},
	exclude={},
	functional_match={},
	keep_crashdump=False,
	keep_work=False,
	n_procs=N_PROCS,
	out_base=None,
	structural_match={},
	workflow_name='bids',
	):
	"""
	Convert and reorganize Bruker "raw" directories (2dseq and ParaVision-formatted metadata files) into a BIDS-organized file hierarchy containing NIfTI files and associated metadata.
	If any exist, this workflow also reposits COSplay event files (already written according to BIDS) in the correct place in the output hierarchy.

	Parameters
	----------

	measurements_base : str
		Path of the top level directory containing all the Bruker scan directories to be converted and reformatted.
	bids_extra : list, optional
		List of strings denoting optional BIDS fields to include in the resulting file names.
		Accepted items are 'acq' and 'run'.
	inflated_size : bool, optional
		Whether to inflate the voxel size reported by the scanner when converting the data to NIfTI.
		Setting this to `True` multiplies the voxel edge lengths by 10 (i.e. the volume by 1000); this is occasionally done in some small animal pipelines, which use routines designed exclusively for human data.
		Unless you are looking to reproduce such a workflow, this should be set to `True`.
	dataset_name : string, optional
		A dataset name that will be written into the BIDS metadata file.
		Generally not needed, as by default we use the dataset path to satisfy this BIDS requirement.
	debug : bool, optional
		Whether to enable debug support.
		This prints the data selection before passing it to the nipype workflow management system, and turns on debug support in nipype (leading to more verbose logging).
	diffusion_match : dict, optional
		A dictionary with any combination of "session", "subject", "task", and "acquisition" as keys and corresponding lists of identifiers as values.
		Only diffusion scans matching all identifiers will be included - i.e. this is a whitelist.
	exclude : dict, optional
		A dictionary with any combination of "session", "subject", "task" , and "acquisition" as keys and corresponding identifiers as values.
		Only scans not matching any of the listed criteria will be included in the workfolow - i.e. this is a blacklist (for functional and structural scans).
	functional_match : dict, optional
		A dictionary with any combination of "session", "subject", "task", and "acquisition" as keys and corresponding lists of identifiers as values.
		Only Functional scans matching all identifiers will be included - i.e. this is a whitelist.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	keep_crashdump : bool, optional
		Whether to keep the crashdump directory (containing all the crash reports for intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	n_procs : int, optional
		Maximum number of processes which to simultaneously spawn for the workflow.
		If not explicitly defined, this is automatically calculated from the number of available cores and under the assumption that the workflow will be the main process running for the duration that it is running.
	out_base : str, optional
		Base directory in which to place the BIDS reposited data.
		If not present the BIDS records will be created in the `measurements_base` directory.
	structural_match : dict, optional
		A dictionary with any combination of "session", "subject", "task", and "acquisition" as keys and corresponding lists of identifiers as values.
		Only structural scans matching all identifiers will be included - i.e. this is a whitelist.
	workflow_name : str, optional
		Top level name for the output directory.
	"""

	measurements_base = path.abspath(path.expanduser(measurements_base))
	if out_base:
		out_base = path.abspath(path.expanduser(out_base))
	else:
		out_base = measurements_base
	out_dir = path.join(out_base,workflow_name)
	workdir_name = workflow_name+'_work'
	workdir = path.join(out_base,workdir_name)

	# define measurement directories to be processed, and populate the list either with the given include_measurements, or with an intelligent selection
	functional_scan_types = diffusion_scan_types = structural_scan_types = []
	data_selection = pd.DataFrame([])
	if structural_match:
		s_data_selection = get_data_selection(measurements_base,
			match=structural_match,
			exclude=exclude,
			measurements=measurements,
			)
		structural_scan_types = list(s_data_selection['scan_type'].unique())
		struct_ind = s_data_selection.index.tolist()
		data_selection = pd.concat([data_selection,s_data_selection], sort=True)
	if functional_match:
		f_data_selection = get_data_selection(measurements_base,
			match=functional_match,
			exclude=exclude,
			measurements=measurements,
			)
		functional_scan_types = list(f_data_selection['scan_type'].unique())
		func_ind = f_data_selection.index.tolist()
		data_selection = pd.concat([data_selection,f_data_selection], sort=True)
	if diffusion_match:
		d_data_selection = get_data_selection(measurements_base,
			match=diffusion_match,
			exclude=exclude,
			measurements=measurements,
			)
		diffusion_scan_types = list(d_data_selection['scan_type'].unique())
		dwi_ind = d_data_selection.index.tolist()
		data_selection = pd.concat([data_selection,d_data_selection], sort=True)

	# we start to define nipype workflow elements (nodes, connections, meta)
	subjects_sessions = data_selection[["subject","session"]].drop_duplicates().values.tolist()
	if debug:
		print('Data selection:')
		print(data_selection)
		print('Iterating over:')
		print(subjects_sessions)
	infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_session'], mandatory_inputs=False), name="infosource")
	infosource.iterables = [('subject_session', subjects_sessions)]

	datasink = pe.Node(nio.DataSink(), name='datasink')
	datasink.inputs.base_directory = out_dir
	datasink.inputs.parameterization = False

	workflow_connections = [
		(infosource, datasink, [(('subject_session',ss_to_path), 'container')]),
		]

	if functional_scan_types:
		if not os.path.exists(workdir):
			os.makedirs(workdir)
		f_data_selection.to_csv(path.join(workdir,'f_data_selection.csv'))
		get_f_scan = pe.Node(name='get_f_scan', interface=util.Function(function=get_bids_scan,input_names=inspect.getargspec(get_bids_scan)[0], output_names=[
			'scan_path', 'typ', 'task', 'nii_path', 'nii_name', 'eventfile_name', 'subject_session', 'metadata_filename', 'dict_slice',
			]))
		get_f_scan.inputs.ignore_exception = True
		get_f_scan.inputs.data_selection = f_data_selection
		get_f_scan.inputs.bids_base = measurements_base
		get_f_scan.iterables = ("ind_type", func_ind)

		f_bru2nii = pe.Node(interface=Bru2(), name="f_bru2nii")
		f_bru2nii.inputs.actual_size = not inflated_size
		f_bru2nii.inputs.compress = True

		f_metadata_file = pe.Node(name='metadata_file', interface=util.Function(function=write_bids_metadata_file,input_names=inspect.getargspec(write_bids_metadata_file)[0], output_names=['out_file']))
		f_metadata_file.inputs.extraction_dicts = BIDS_METADATA_EXTRACTION_DICTS

		events_file = pe.Node(name='events_file', interface=util.Function(function=write_bids_events_file,input_names=inspect.getargspec(write_bids_events_file)[0], output_names=['out_file']))
		events_file.ignore_exception = True

		workflow_connections = [
			(get_f_scan, datasink, [(('subject_session',ss_to_path), 'container')]),
			(get_f_scan, f_bru2nii, [('scan_path', 'input_dir')]),
			(get_f_scan, f_bru2nii, [('nii_name', 'output_filename')]),
			(f_metadata_file, events_file, [('out_file', 'metadata_file')]),
			(f_bru2nii, events_file, [('nii_file', 'timecourse_file')]),
			(f_bru2nii, datasink, [('nii_file', 'func')]),
			(get_f_scan, f_metadata_file, [
				('metadata_filename', 'out_file'),
				('task', 'task'),
				('scan_path', 'scan_dir')
				]),
			(get_f_scan, events_file, [
				('eventfile_name', 'out_file'),
				('task', 'task'),
				('scan_path', 'scan_dir')
				]),
			(events_file, datasink, [('out_file', 'func.@events')]),
			(f_metadata_file, datasink, [('out_file', 'func.@metadata')]),
			]
		crashdump_dir = path.join(out_base,workflow_name+'_crashdump')
		workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
		if debug:
			workflow_config['logging'] = {
				'workflow_level':'DEBUG',
				'utils_level':'DEBUG',
				'interface_level':'DEBUG',
				'filemanip_level':'DEBUG',
				'log_to_file':'true',
				}

		workflow = pe.Workflow(name=workdir_name)
		workflow.connect(workflow_connections)
		workflow.base_dir = path.join(out_base)
		workflow.config = workflow_config
		try:
			workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph_structural.dot"), graph2use="hierarchical", format="png")
		except OSError:
			print('We could not write the DOT file for visualization (`dot` function from the graphviz package). This is non-critical to the processing, but you should get this fixed.')

		#Execute the workflow
		if not keep_work or not keep_crashdump:
			try:
				workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
			except RuntimeError:
				pass
		else:
			workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
		if not keep_work:
			shutil.rmtree(path.join(workflow.base_dir,workdir_name))
		if not keep_crashdump:
			try:
				shutil.rmtree(crashdump_dir)
			except (FileNotFoundError, OSError):
				pass

	if diffusion_scan_types:
		# We check for the directory, since it gets deleted after a successful execution.
		if not os.path.exists(workdir):
			os.makedirs(workdir)
		d_data_selection.to_csv(path.join(workdir,'d_data_selection.csv'))
		get_d_scan = pe.Node(name='get_d_scan', interface=util.Function(function=get_bids_scan,input_names=inspect.getargspec(get_bids_scan)[0], output_names=[
			'scan_path', 'typ', 'task', 'nii_path', 'nii_name', 'eventfile_name', 'subject_session', 'metadata_filename', 'dict_slice',
			]))
		get_d_scan.inputs.ignore_exception = True
		get_d_scan.inputs.data_selection = d_data_selection
		get_d_scan.inputs.extra = ['acq']
		get_d_scan.inputs.bids_base = measurements_base
		get_d_scan.iterables = ("ind_type", dwi_ind)

		d_bru2nii = pe.Node(interface=Bru2(), name="d_bru2nii")
		d_bru2nii.inputs.force_conversion=True
		d_bru2nii.inputs.actual_size = not inflated_size
		d_bru2nii.inputs.compress = True

		d_metadata_file = pe.Node(name='metadata_file', interface=util.Function(function=write_bids_metadata_file,input_names=inspect.getargspec(write_bids_metadata_file)[0], output_names=['out_file']))
		d_metadata_file.inputs.extraction_dicts = BIDS_METADATA_EXTRACTION_DICTS

		workflow_connections = [
			(get_d_scan, datasink, [(('subject_session',ss_to_path), 'container')]),
			(get_d_scan, d_bru2nii, [('scan_path', 'input_dir')]),
			(get_d_scan, d_bru2nii, [('nii_name', 'output_filename')]),
			(d_bru2nii, datasink, [('nii_file', 'dwi')]),
			(get_d_scan, d_metadata_file, [
				('metadata_filename', 'out_file'),
				('task', 'task'),
				('scan_path', 'scan_dir')
				]),
			(d_metadata_file, datasink, [('out_file', 'dwi.@metadata')]),
			]

		crashdump_dir = path.join(out_base,workflow_name+'_crashdump')
		workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
		if debug:
			workflow_config['logging'] = {
				'workflow_level':'DEBUG',
				'utils_level':'DEBUG',
				'interface_level':'DEBUG',
				'filemanip_level':'DEBUG',
				'log_to_file':'true',
				}

		workflow = pe.Workflow(name=workdir_name)
		workflow.connect(workflow_connections)
		workflow.base_dir = path.join(out_base)
		workflow.config = workflow_config
		try:
			workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph_diffusion.dot"), graph2use="hierarchical", format="png")
		except OSError:
			print('We could not write the DOT file for visualization (`dot` function from the graphviz package). This is non-critical to the processing, but you should get this fixed.')

		#Execute the workflow
		if not keep_work or not keep_crashdump:
			try:
				workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
			except RuntimeError:
				pass
		else:
			workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
		if not keep_work:
			shutil.rmtree(path.join(workflow.base_dir,workdir_name))
		if not keep_crashdump:
			try:
				shutil.rmtree(crashdump_dir)
			except (FileNotFoundError, OSError):
				pass

	if structural_scan_types:
		# We check for the directory, since it gets deleted after a successful execution.
		if not os.path.exists(workdir):
			os.makedirs(workdir)
		s_data_selection.to_csv(path.join(workdir,'s_data_selection.csv'))
		get_s_scan = pe.Node(name='get_s_scan', interface=util.Function(function=get_bids_scan,input_names=inspect.getargspec(get_bids_scan)[0], output_names=[
			'scan_path', 'typ', 'task', 'nii_path', 'nii_name', 'eventfile_name', 'subject_session', 'metadata_filename', 'dict_slice',
			]))
		get_s_scan.inputs.ignore_exception = True
		get_s_scan.inputs.data_selection = s_data_selection
		get_s_scan.inputs.extra = ['acq']
		get_s_scan.inputs.bids_base = measurements_base
		get_s_scan.iterables = ("ind_type", struct_ind)

		s_bru2nii = pe.Node(interface=Bru2(), name="s_bru2nii")
		s_bru2nii.inputs.force_conversion=True
		s_bru2nii.inputs.actual_size = not inflated_size
		s_bru2nii.inputs.compress = True

		s_metadata_file = pe.Node(name='metadata_file', interface=util.Function(function=write_bids_metadata_file,input_names=inspect.getargspec(write_bids_metadata_file)[0], output_names=['out_file']))
		s_metadata_file.inputs.extraction_dicts = BIDS_METADATA_EXTRACTION_DICTS

		workflow_connections = [
			(get_s_scan, datasink, [(('subject_session',ss_to_path), 'container')]),
			(get_s_scan, s_bru2nii, [('scan_path', 'input_dir')]),
			(get_s_scan, s_bru2nii, [('nii_name', 'output_filename')]),
			(s_bru2nii, datasink, [('nii_file', 'anat')]),
			(get_s_scan, s_metadata_file, [
				('metadata_filename', 'out_file'),
				('task', 'task'),
				('scan_path', 'scan_dir')
				]),
			(s_metadata_file, datasink, [('out_file', 'anat.@metadata')]),
			]
		crashdump_dir = path.join(out_base,workflow_name+'_crashdump')
		workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
		if debug:
			workflow_config['logging'] = {
				'workflow_level':'DEBUG',
				'utils_level':'DEBUG',
				'interface_level':'DEBUG',
				'filemanip_level':'DEBUG',
				'log_to_file':'true',
				}

		workflow = pe.Workflow(name=workdir_name)
		workflow.connect(workflow_connections)
		workflow.base_dir = path.join(out_base)
		workflow.config = workflow_config
		try:
			workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph_structural.dot"), graph2use="hierarchical", format="png")
		except OSError:
			print('We could not write the DOT file for visualization (`dot` function from the graphviz package). This is non-critical to the processing, but you should get this fixed.')

		#Execute the workflow
		if not keep_work or not keep_crashdump:
			try:
				workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
			except RuntimeError:
				pass
		else:
			workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
		if not keep_work:
			shutil.rmtree(path.join(workflow.base_dir,workdir_name))
		if not keep_crashdump:
			try:
				shutil.rmtree(crashdump_dir)
			except (FileNotFoundError, OSError):
				pass

	# This is needed because BIDS does not yet support CBV
	with open(path.join(out_dir,".bidsignore"), "w+") as f:
		f.write('*_cbv.*')

	# Create essions files
	sessions_file(out_dir, data_selection)

	# BIDS needs a descriptor file
	if not dataset_name:
		dataset_name = measurements_base
	description = {
		'Name':dataset_name,
		'BIDSVersion':'1.0.2',
		}
	with open(path.join(out_dir,'dataset_description.json'), 'w') as f:
		json.dump(description, f)