def interface(bids_dir, output_dir, subject_list=None, collect=False, ncpus=1, stages=None, bandstop_params=None, check_only=False, run_abcd_task=False, study_template=None, cleaning_json=None, print_commands=False, ignore_expected_outputs=False, ignore_modalities=[], freesurfer_license=None): """ main application interface :param bids_dir: input bids dataset see "helpers.read_bids_dataset" for more information. :param output_dir: output folder :param subject_list: subject and session list filtering. See "helpers.read_bids_dataset" for more information. :param collect: treats each subject as having only one session. :param ncpus: number of cores for parallelized processing. :param stages: only run a subset of stages. :param bandstop_params: tuple of lower and upper bound for stop-band filter :param check_only: check expected outputs for each stage then terminate :return: """ if not check_only or not print_commands: validate_license(freesurfer_license) # read from bids dataset assert os.path.isdir(bids_dir), bids_dir + ' is not a directory!' if not os.path.isdir(output_dir): os.makedirs(output_dir) session_generator = read_bids_dataset( bids_dir, subject_list=subject_list, collect_on_subject=collect ) # run each session in serial for session in session_generator: # setup session configuration out_dir = os.path.join( output_dir, 'sub-%s' % session['subject'], 'ses-%s' % session['session'] ) # detect available data for pipeline stages validate_config(session, ignore_modalities) modes = session['types'] run_anat = 'T1w' in modes run_func = 'bold' in modes and 'func' not in ignore_modalities run_dwi = 'dwi' in modes and 'dwi' not in ignore_modalities summary = True session_spec = ParameterSettings(session, out_dir) # set session parameters if study_template is not None: session_spec.set_study_template(*study_template) # create pipelines order = [] if run_anat: pre = PreFreeSurfer(session_spec) free = FreeSurfer(session_spec) post = PostFreeSurfer(session_spec) order += [pre, free, post] if run_func: vol = FMRIVolume(session_spec) surf = FMRISurface(session_spec) boldproc = DCANBOLDProcessing(session_spec) order += [vol, surf, boldproc] if run_dwi: print('dwi preprocessing is still a work in progress. Skipping.') if False: diffprep = DiffusionPreprocessing(session_spec) order += [diffprep] if summary: execsum = ExecutiveSummary(session_spec) order += [execsum] # set user parameters if bandstop_params is not None: boldproc.set_bandstop_filter(*bandstop_params) # add optional pipelines if run_abcd_task: abcdtask = ABCDTask(session_spec) order.append(abcdtask) if cleaning_json: cclean = CustomClean(session_spec, cleaning_json) order.append(cclean) if stages: # User can indicate start or end or both; default # to entire list built above. start_idx = 0 end_idx = len(order) idx_colon = stages.find(":") if idx_colon > -1: # Start stage is everything before the colon. start_stage = stages[:idx_colon] # End stage is everything after the colon. end_stage = stages[(idx_colon+1):] else: # No colon means no end stage. start_stage = stages end_stage = None names = [x.__class__.__name__ for x in order] if start_stage: assert start_stage in names, \ '"%s" is unknown, check class name and case for given stage' \ % start_stage start_idx = names.index(start_stage) if end_stage: assert end_stage in names, \ '"%s" is unknown, check class name and case for given stage' \ % end_stage end_idx = names.index(end_stage) end_idx += 1 # Include end stage. # Slice the list. order = order[start_idx:end_idx] # special runtime options if check_only: for stage in order: print('checking outputs for %s' % stage.__class__.__name__) try: stage.check_expected_outputs() except AssertionError: pass return if print_commands: for stage in order: stage.deactivate_runtime_calls() stage.deactivate_check_expected_outputs() stage.deactivate_remove_expected_outputs() if ignore_expected_outputs: print('ignoring checks for expected outputs.') for stage in order: stage.activate_ignore_expected_outputs() # run pipelines for stage in order: print('running %s' % stage.__class__.__name__) print(stage) stage.run(ncpus)
def interface(bids_dir, output_dir, subject_list=None, session_list=None, collect=False, ncpus=1, start_stage=None, bandstop_params=None, max_cortical_thickness=5, check_only=False, study_template=None, cleaning_json=None, print_commands=False, ignore_expected_outputs=False, multi_template_dir=None, norm_method=None, registration_assist=None, freesurfer_license=None): """ main application interface :param bids_dir: input bids dataset see "helpers.read_bids_dataset" for more information. :param output_dir: output folder :param subject_list: subject and session list filtering. See "helpers.read_bids_dataset" for more information. :param session_list: subject and session list filtering. :param collect: treats each subject as having only one session. :param ncpus: number of cores for parallelized processing. :param start_stage: start from a given stage. :param bandstop_params: tuple of lower and upper bound for stop-band filter :param max_cortical_thickness: maximum cortical thickness allowed in FreeSurfer. :param check_only: check expected outputs for each stage then terminate :param sshead: study specific template head for brain masking :param sshead: study specific template brain for brain masking :param multi_template_dir: directory of joint label fusion atlases :param norm_method: which method will be used for hyper-normalization step. :return: """ if not check_only or not print_commands: validate_license(freesurfer_license) # read from bids dataset assert os.path.isdir(bids_dir), bids_dir + ' is not a directory!' if not os.path.isdir(output_dir): os.makedirs(output_dir) session_generator = read_bids_dataset(bids_dir, subject_list=subject_list, session_list=session_list) # run each session in serial for session in session_generator: # setup session configuration out_dir = os.path.join(output_dir, 'sub-%s' % session['subject'], 'ses-%s' % session['session']) session_spec = ParameterSettings(session, out_dir) if norm_method is None: # Use default: ADULT_GM_IP. session_spec.set_hypernormalization_method("ADULT_GM_IP") else: session_spec.set_hypernormalization_method(norm_method) if study_template is not None: session_spec.set_study_templates(*study_template) if multi_template_dir is not None: session_spec.set_templates_dir(multi_template_dir) if max_cortical_thickness is not 5: session_spec.set_max_cortical_thickness(max_cortical_thickness) # create pipelines mask = PreliminaryMasking(session_spec) pre = PreFreeSurfer(session_spec) free = FreeSurfer(session_spec) post = PostFreeSurfer(session_spec) vol = FMRIVolume(session_spec) surf = FMRISurface(session_spec) boldproc = DCANBOLDProcessing(session_spec) execsum = ExecutiveSummary(session_spec) # set user parameters if registration_assist: vol.set_registration_assist(*registration_assist) if bandstop_params is not None: boldproc.set_bandstop_filter(*bandstop_params) # determine pipeline order order = [mask, pre, free, post, vol, surf, boldproc, execsum] if cleaning_json: cclean = CustomClean(session_spec, cleaning_json) order.append(cclean) if start_stage: names = [x.__class__.__name__ for x in order] assert start_stage in names, \ '"%s" is unknown, check class name and case for given stage' \ % start_stage order = order[names.index(start_stage):] # special runtime options if check_only: for stage in order: print('checking outputs for %s' % stage.__class__.__name__) try: stage.check_expected_outputs() except AssertionError: pass return if print_commands: for stage in order: stage.deactivate_runtime_calls() stage.deactivate_check_expected_outputs() stage.deactivate_remove_expected_outputs() if ignore_expected_outputs: print('ignoring checks for expected outputs.') for stage in order: stage.activate_ignore_expected_outputs() # run pipelines for stage in order: print('running %s' % stage.__class__.__name__) print(stage) stage.run(ncpus)