Esempio n. 1
0
def write_command(anat_input, prefix):
    """Create a command script."""
    with flywheel.GearContext() as context:
        cmd = [
            '/opt/scripts/runAntsCT_nonBIDS.pl',
            '--anatomical-image {}'.format(anat_input),
            '--output-dir {}'.format(gear_output_dir),
            '--output-file-root {}'.format(prefix),
            '--denoise {}'.format(denoise),
            '--num-threads {}'.format(num_threads),
            '--run-quick {}'.format(run_quick),
            '--trim-neck-mode {}'.format(trim_neck_mode)
        ]
        if mni_cort_labels_paths_str:
            cmd.append(
                '--mni-cortical-labels {}'.format(mni_cort_labels_paths_str))

        if mni_labels_paths_str:
            cmd.append('--mni-labels {}'.format(mni_labels_paths_str))

    logger.info(' '.join(cmd))
    with antsct_script.open('w') as f:
        f.write(' '.join(cmd))

    return antsct_script.exists()
def main():
    with flywheel.GearContext() as gear_context:
        gear_context.init_logging()
        log.info(gear_context.config)
        log.info(gear_context.destination)
        analysis_id = gear_context.destination.get('id')
        analysis = gear_context.client.get_analysis(analysis_id)
        parent = gear_context.client.get_container(analysis.parent['id'])
        parent_path = utils.get_resolver_path(gear_context.client, parent)

        # Run the metadata script
        try:
            error_df, error_count = transfer_log.main(gear_context, 'INFO',
                                                      parent_path)
        except transfer_log.TransferLogException as e:
            create_output_file(e.errors, 'csv', gear_context,
                               'error-transfer-log.csv', True)
            raise e

        log.info('Writing error report')
        fname = gear_context.config.get('filename')
        error_report_path = os.path.join(gear_context.output_dir, fname)
        error_df.to_csv(error_report_path, index=False)
        log.info('Wrote error report with filename %s', error_report_path)

        # Update analysis label
        timestamp = datetime.datetime.utcnow()
        analysis_label = 'TRANSFER_ERROR_COUNT_{}_AT_{}'.format(
            error_count, timestamp)
        log.info('Updating label of analysis=%s to %s', analysis.id,
                 analysis_label)

        analysis.update({'label': analysis_label})
Esempio n. 3
0
    def setUp(self):
        self._path = tempfile.mkdtemp()
        cfg_path = os.path.join(self._path, 'config.json')
        with open(cfg_path, 'w') as f:
            json.dump(INVOCATION, f)

        self.context = flywheel.GearContext(gear_path=self._path)
        self.group_id = None
        self.project_id = None
Esempio n. 4
0
def main():
    with flywheel.GearContext() as gear_context:
        gear_context.init_logging()
        log.info(gear_context.config)
        log.info(gear_context.destination)
        container_type = gear_context.config.get('container_type')
        analysis = gear_context.client.get_analysis(
            gear_context.destination['id'])
        parent = gear_context.client.get_container(analysis.parent['id'])

        # Get all containers
        # TODO: Should it be based on whether the error.log file exists?
        log.info('Finding containers with errors...')
        error_containers = find_error_containers(container_type, parent)
        log.debug('Found %d conainers', len(error_containers))

        # Set the resolve paths
        add_additional_info(error_containers, gear_context.client)

        # Set the status for the containers
        log.info('Resolving status for invalid containers...')
        # TODO: Figure out the validator stuff, maybe have our validation be a
        # pip module?
        errors = get_errors(error_containers, gear_context.client)
        error_count = len(errors)

        log.info('Writing error report')
        timestamp = datetime.datetime.utcnow()
        filename = create_output_file(parent.label, errors,
                                      gear_context.config.get('file_type'),
                                      gear_context, timestamp,
                                      gear_context.config.get('filename'))
        log.info('Wrote error report with filename {}'.format(filename))

        # Update analysis label
        analysis_label = 'Metadata Error Report: COUNT={} [{}]'.format(
            error_count, timestamp)
        log.info('Updating label of analysis={} to {}'.format(
            analysis.id, analysis_label))

        # TODO: Remove this when the sdk lets me do this
        update_analysis_label(
            parent.container_type, parent.id, analysis.id, analysis_label,
            gear_context.client._fw.api_client.configuration.
            api_key['Authorization'],
            gear_context.client._fw.api_client.configuration.host)
Esempio n. 5
0
def main():
    with flywheel.GearContext() as gear_context:
        # Add manifest.json as the manifest_json attribute
        setattr(gear_context, 'manifest_json', load_manifest_json())
        # Initialize logging
        gear_context.log = log_initializer(gear_context)
        # Log the gear configuration
        gear_context.log.critical('Starting OpenSlide to PNG Converter...')
        gear_context.log.info('Gear configuration:')
        gear_context.log_config()

        input_filepath = gear_context.get_input_path('image')

        output_filepath = get_output_path(input_filepath,
                                          gear_context.output_dir)

        convert_to_png(input_filepath, output_filepath, gear_context)
        if os.path.isfile(output_filepath):

            gear_context.log.info('Job completed successfully!!')
            os.sys.exit(0)
Esempio n. 6
0
def write_command():
    """Write out command script."""
    with flywheel.GearContext() as context:
        cmd = [
            "/usr/local/bin/python", "/opt/scripts/run.py",
            "--label_index_file {}".format(
                "/opt/labelset/Schaefer2018_200Parcels_17Networks_order.csv"),
            "--label_image_file {}".format(label_image_path),
            "--ct_image_file {}".format(ct_image_path),
            "--t1_image_file {}".format(t1_image_path),
            "--patient_age {}".format(str(age)),
            "--patient_sex {}".format(sex),
            "--thresholds '{}'".format(wthresholds),
            "--prefix {}".format(prefix),
            "--output_dir {}".format(gear_output_dir)
        ]
    logger.info(' '.join(cmd))
    # write command joined by spaces
    with run_script.open('w') as f:
        f.write(' '.join(cmd))

    return run_script.exists()
Esempio n. 7
0
def get_dest_cont_file_dict(input_key):
    """
    Gets the current file info for the file, if the input parent
    :param input_key: the key for the input in the manifest
    :type input_key: str
    :return: a dictionary representing the file object
    """
    with flywheel.GearContext() as gear_context:
        file_name = gear_context.get_input(input_key).get('location',
                                                          {}).get('name')
        parent_id = gear_context.get_input(input_key).get('hierarchy',
                                                          {}).get('id')
        parent_type = gear_context.get_input(input_key).get('hierarchy',
                                                            {}).get('type')
        if parent_id == 'aex':
            parent_id = '5e6937e3529e160bd3812da1'
        if not file_name:
            file_dict = dict()
        else:
            fw_client = gear_context.client
            file_dict = dest_file_dict_request(fw_client, parent_id, file_name)
        return file_dict, parent_type
Esempio n. 8
0
def main():
    context = flywheel.GearContext()
    config = context.config
    
    set_environment()

    # Load in paths to input files for the gear
    input_volume = context.get_input_path('t1w_anatomy')  # A zip file with NIfTI

    # Load in values from the gear configuration
    subject_id = config['subject_id']  # Name of folder containing subject i.e., 10000
    directive = config['directive']  # Flag indicating subset of processing steps i.e., autorecon1

    # input_volume = '/Users/pereanez/Documents/flywheel/gears/reconAll/data/10000/Sag_T1.nii.gz'
    # subject_id = '10000'
    # directive = 'autorecon1'

    command = create_command(subject_id, input_volume, directive)

    call_command(command)

    cleanup(subject_id, directive)
Esempio n. 9
0
#!/usr/bin/env python
import logging
import os

import flywheel

from container_export import ContainerExporter

log = logging.getLogger("[GRP 9]:")
log.setLevel(logging.INFO)


def main(gear_context):
    exporter = ContainerExporter.from_gear_context(gear_context)
    return exporter.export()


if __name__ == '__main__':
    # with flywheel.GearContext() as context:
    with flywheel.GearContext() as context:
        if context.config.get("log_debug"):
            level = logging.DEBUG
        else:
            level = logging.INFO
        logging.basicConfig(level=level)
        log.info("{}".format(context.config))
        return_code = main(context)
        log.info("DONE!")
        os._exit(return_code)
Esempio n. 10
0
    #         make_file_name_safe(subject.code, '_')
    # else:
    #     context.gear_dict['subject_code'] = 'unknown_subject'
    #     context.gear_dict['subject_code_safe'] = 'unknown_subject'
    #     log.warning('Subject code is ' + context.gear_dict['subject_code'])

    # session_id = dest_container.parents.session
    # context.gear_dict['session_id'] = session_id
    # if session_id:
    #     session = fw.get(session_id)
    #     context.gear_dict['session_label'] = session.label
    #     context.gear_dict['session_label_safe'] = \
    #         make_file_name_safe(session.label, '_')
    # else:
    #     context.gear_dict['session_label'] = 'unknown_session'
    #     context.gear_dict['session_label_safe'] = 'unknown_session'
    #     log.warning('Session label is ' + context.gear_dict['session_label'])
    
    session_id = dest_container.parents.session

    with open(qa_result_file) as qa_result_data:
        qa_result_json = json.load(qa_result_data)
    session = fw.get(session_id)
    session.update(info={"qa":qa_result_json})


if __name__ == "__main__":

    context = flywheel.GearContext()  # Get the gear context.
    log = write_to_session(context)
Esempio n. 11
0
    output_folder = '/flywheel/v0/output/'
    config_file_path = '/flywheel/v0/config.json'
    metadata_output_filepath = os.path.join(output_folder, '.metadata.json')

    # Load config file
    with open(config_file_path) as config_data:
        config = json.load(config_data)
    # Set dicom path and name from config file
    dicom_filepath = config['inputs']['dicom']['location']['path']
    dicom_name = config['inputs']['dicom']['location']['name']
    # Get the current dicom metadata
    dicom_metadata = config['inputs']['dicom']['object']
    # Get the modality
    modality = config['inputs']['dicom']['object']['modality']
    # Get Acquisition
    with flywheel.GearContext() as gear_context:
        acquisition = gear_context.client.get(gear_context.destination['id'])
    df, dcm = dicom_processor.process_dicom(dicom_filepath)

    # Check that metadata import ran
    try:
        dicom_header = dicom_metadata['info']['header']['dicom']
    except KeyError:
        print(
            'ERROR: No dicom header information found! Please run metadata import and validation.'
        )
        sys.exit(1)

    if modality == "MR":
        dicom_metadata = MR_classifier.classify_MR(df, dcm, dicom_metadata)
    elif modality == 'CT':
Esempio n. 12
0
        if len(context.gear_dict['errors']) > 0:
            msg = 'Previous errors:\n'
            for err in context.gear_dict['errors']:
                if str(type(err)).split("'")[1] == 'str':
                    # show string
                    msg += '  Error msg: ' + str(err) + '\n'
                else:  # show type (of error) and error message
                    msg += '  ' + str(
                        type(err)).split("'")[1] + ': ' + str(err) + '\n'
            log.info(msg)
            ret = 1

        log.info('BIDS App Gear is done.  Returning ' + str(ret))
        os.sys.exit(ret)


if __name__ == '__main__':
    GEAR_COMMAND = '/code/extract_FA.py'
    DOWNLOAD_MODALITIES = ["anat", "dwi"]  # [] for all

    context = flywheel.GearContext()

    log = initialize(context)

    create_command(context, log)

    if len(context.gear_dict['errors']) == 0:
        set_up_data(context, log)

    execute(context, log)
Esempio n. 13
0
def write_qsiprep_command():
    """Create a command script."""
    with flywheel.GearContext() as context:
        cmd = [
            '/usr/local/miniconda/bin/qsiprep',
            str(bids_root),
            str(output_root),
            'participant',
            '--stop_on_first_crash', '-v', '-v',
            # anat_only=False,
            '--b0-motion-corr-to', config.get('b0_motion_corr_to', 'iterative'),
            '--b0_threshold', str(int(config.get('b0_threshold', 100))),
            '--b0_to_t1w_transform', 'Rigid',
            '--dwi-denoise-window', str(config.get('dwi_denoise_window', 5)),
            '--fs-license-file', context.get_input_path('freesurfer_license'),
            '--hmc-model', config.get('hmc_model', 'eddy'),
            '--hmc-transform', config.get('hmc_transform', 'Affine'),
            '-w', str(working_dir),
            '--output-resolution', str(config.get('output_resolution')),
            '--output-space', config.get('output_space'),
            '--run-uuid', analysis_id,
            '--template', config.get('template', 'MNI152NLin2009cAsym')]
        # if acquisition_type is not None:
        #     cmd += ['--acquisition_type', acquisition_type]
        # If on HPC, get the cores/memory limits
        if config.get('sge-cpu'):
            # Parse SGE cpu syntax, such as "4-8" or just "4"
            cpuMin = int(config.get('sge-cpu').split('-')[0])
            cmd += ['--n_cpus', str(max(1, cpuMin - 1))]
        if config.get('combine_all_dwis', False):
            cmd.append('--combine_all_dwis')
        if config.get('denoise_before_combining', False):
            cmd.append('--denoise-before-combining')
        if config.get('do_reconall', False):
            cmd.append('--do-reconall')
        if context.get_input_path('eddy_config'):
            cmd += ['--eddy-config', context.get_input_path('eddy_config')]
        if config.get('fmap_bspline', False):
            cmd.append('--fmap-bspline')
        if config.get('fmap_no_demean', False):
            cmd.append('--fmap-no-demean')
        if config.get('force_spatial_normalization', False):
            cmd.append('--force-spatial-normalization')
        if config.get('force_syn', False):
            cmd.append('--force-syn')
        if config.get("ignore"):
            cmd += ['--ignore', config.get("ignore")]
        if config.get('impute_slice_threshold'):
            cmd += ['--impute-slice-threshold', str(config.get('impute_slice_threshold'))]
        if config.get('intramodal_template_iters'):
            cmd += ['--intramodal-template-iters',
                    str(config.get('intramodal_template_iters'))]
        if config.get('intramodal_template_transform'):
            cmd += ['--intramodal-template-transform',
                    config.get('intramodal_template_transform')]
        if config.get('longitudinal', False):
            cmd.append('--longitudinal')
        if config.get('notrack'):
            cmd.append('--notrack')
        if config.get('prefer_dedicated_fmaps'):
            cmd.append('--prefer-dedicated-fmaps')
        if recon_spec:
            cmd += ['--recon-spec', recon_spec]
        if config.get('shoreline_iters'):
            cmd += ['--shoreline-iters',
                    str(config.get('shoreline_iters'))]
        if config.get('skip_bids_validation'):
            cmd.append('--skip-bids-validation')
        if config.get('skull_strip_fixed_seed'):
            cmd.append('--skull-strip-fixed-seed')
        if config.get('skull_strip_template'):
            cmd += ['--skull_strip_template', config.get('skull_strip_template')]
        if config.get('sloppy', False):
            cmd.append('--sloppy')
        if config.get('use_syn_sdc', False):
            cmd.append('--use-syn-sdc')
        if config.get('write_local_bvecs'):
            cmd.append('--write-local-bvecs')
        if config.get('unringing_method'):
            cmd += ['--unringing-method', config.get('unringing_method')]
        if config.get('no_b0_harmonization'):
            cmd += ['--no-b0-harmonization']
        if config.get('dwi_no_biascorr'):
            cmd += ['dwi_no_biascorr']
    logger.info(' '.join(cmd))
    with qsiprep_script.open('w') as f:
        f.write(' '.join(cmd))

    return qsiprep_script.exists()
Esempio n. 14
0
def write_fmriprep_command():
    """Create a command script."""
    with flywheel.GearContext() as context:

        # Mandatory arguments
        cmd = [
            '/usr/local/miniconda/bin/fmriprep', '--stop-on-first-crash', '-v',
            '-v',
            str(bids_root),
            str(output_root), 'participant', '--fs-license-file',
            context.get_input_path('freesurfer_license'), '-w',
            str(working_dir), '--output-spaces',
            config.get('output_spaces'), '--run-uuid', analysis_id
        ]

        # External FreeSurfer Input
        if context.get_input_path("freesurfer_input"):
            cmd += [
                '--fs-subjects-dir',
                context.get_input_path("freesurfer_input")
            ]

        # JSON file that contains a file filter
        if context.get_input_path("bids_filter_file"):
            cmd += [
                '--bids-filter-file',
                context.get_input_path("bids_filter_file")
            ]

        if config.get('skip_bids_validation'):
            cmd.append('--skip-bids-validation')
        if config.get('task_id'):
            cmd += ['--task-id', config.get('task_id')]
        if config.get('anat_only', False):
            cmd.append('--anat-only')
        if config.get("ignore"):
            cmd += ['--ignore', config.get("ignore")]
        if config.get('longitudinal', False):
            cmd.append('--longitudinal')
        if config.get('t2s_coreg'):
            cmd.append('--t2s-coreg')
        if config.get('bold2t1w_dof'):
            cmd += ['--bold2t1w-dof', str(config.get('bold2t1w_dof'))]
        if config.get('force_bbr'):
            cmd.append('--force-bbr')
        if config.get('force_no_bbr'):
            cmd.append('--force-no-bbr')
        if config.get('dummy_scans'):
            cmd += ['--dummy-scans', str(config.get('dummy_scans'))]

        # Aroma options
        if config.get('use_aroma'):
            cmd.append('--use-aroma')
            if config.get('aroma_melodic_dimensionality'):
                cmd += [
                    '--aroma-melodic-dimensionality',
                    '%d' % config.get('aroma_melodic_dimensionality')
                ]

        # Confounds options
        if config.get('return_all_components'):
            cmd.append('--return-all-components')
        if config.get('fd_spike_threshold'):
            cmd += [
                '--fd-spike-threshold',
                str(config.get('fd_spike_threshold'))
            ]
        if config.get('dvars_spike_threshold'):
            cmd += [
                '--dvars-spike-threshold',
                str(config.get('dvars_spike_threshold'))
            ]

        # Specific options for ANTs registrations
        if config.get('skull_strip_fixed_seed'):
            cmd.append('--skull-strip-fixed-seed')
        if config.get('skull_strip_template'):
            cmd += [
                '--skull-strip-template',
                config.get('skull_strip_template')
            ]

        # Fieldmap options
        if config.get('fmap_bspline', False):
            cmd.append('--fmap-bspline')
        if config.get('fmap_no_demean', False):
            cmd.append('--fmap-no-demean')

        # Specific options for SyN distortion correction
        if config.get('force_syn', False):
            cmd.append('--force-syn')
        if config.get('use_syn_sdc', False):
            cmd.append('--use-syn-sdc')

        # Surface preprocessing options
        if config.get('fs_no_reconall', False):
            cmd.append('--fs-no-reconall')
        if not config.get('cifti_output') == 'None':
            cmd += ['--cifti-output', config.get('cifti_output')]
        if config.get('no_submm_recon'):
            cmd.append('--no-submm-recon')
        if config.get('medial_surface_nan'):
            cmd.append('--medial-surface-nan')

        # If on HPC, get the cores/memory limits
        if config.get('sge-cpu'):
            # Parse SGE cpu syntax, such as "4-8" or just "4"
            cpuMin = int(config.get('sge-cpu').split('-')[0])
            cmd += ['--nthreads', str(max(1, cpuMin - 1))]

        if config.get('notrack'):
            cmd.append('--notrack')
        if config.get('sloppy', False):
            cmd.append('--sloppy')

    logger.info(' '.join(cmd))
    with fmriprep_script.open('w') as f:
        f.write(' '.join(cmd))

    return fmriprep_script.exists()
Esempio n. 15
0
def main():
    context = flywheel.GearContext()
    context.custom_dict={}
    # Create a 'dry run' flag for debugging
    context.custom_dict['dry-run'] = False

    context.log = get_custom_logger('[flywheel:taigw/brats17]')

    # grab environment for gear
    with open('/tmp/gear_environ.json', 'r') as f:
        environ = json.load(f)
    context.custom_dict['environ'] = environ

    # Report inputs and configuration
    context.log_config()

    try:
        # verify required inputs are present 
        inputs = context._invocation['inputs']
        required_files = ['t1','t1ce','t2','flair']
        missing = []
        for fl in required_files:
            if not fl in inputs.keys():
                missing.append(fl)

        if len(missing)>0:
            raise Exception('The following file(s) are required: {}'.format(missing))

        # execute algorithm on parameters
        # Setup the directory structure expected by brats17/test.py for a 
        # single subject (e.g. /flywheel/v0/work/{subject}/{fl})
        subject = context.config["Subject"]
        os.makedirs(op.join(context.work_dir,subject))

        inputs = context._invocation['inputs']
        # Make symbolic links between the flywheel file structure and the file 
        # structure expected by brats/test.py
        for fl in inputs:
            if "location" in inputs[fl]:
                src = inputs[fl]["location"]["path"]
                dest = op.join(context.work_dir,subject,inputs[fl]["location"]["name"])
                os.symlink(src, dest) 

        # Create the test_names.txt file needed by taigw/brats2017 code...
        # with the subject that is downloaded. 
        f = open('test_names.txt','w')
        f.write(subject)
        f.close()

        # change to the model execution directory
        os.chdir("/usr/src/app/")
        command = ["python","test.py","/flywheel/v0/test_all_class.txt"]
        exec_command(context,command)

    except Exception as e:
        context.log.exception(e)
        context.log.error(
            'Cannot execute https://github.com/taigw/brats17 commands.'
        )
        os.sys.exit(1)

    
    # On successful completion, notify and exit gracefully
    context.log.info("Commands successfully executed!")
    os.sys.exit(0)
Esempio n. 16
0
def main():
    # Preamble: take care of all gear-typical activities.
    context = flywheel.GearContext()
    context.gear_dict = {}
    # Initialize all hcp-gear variables.
    gear_preliminaries.initialize_gear(context)

    context.log_config()

    # Utilize FreeSurfer license from config or project metadata.
    try:
        gear_preliminaries.set_freesurfer_license(context)
    except Exception as e:
        context.log.exception(e)
        context.log.fatal(
            "A valid FreeSurfer license must be present to run. "
            "Please check your configuration and try again."
        )
        os.sys.exit(1)

    # Before continuing from here, we need to validate the config.json
    # Validate gear configuration against gear manifest
    try:
        gear_preliminaries.validate_config_against_manifest(context)
    except Exception as e:
        context.log.error("Invalid Configuration:")
        context.log.exception(e)
        context.log.fatal("Please make the prescribed corrections and try again.")
        os.sys.exit(1)

    # Get file list and configuration from hcp-struct zipfile
    try:
        hcp_struct_zip_filename = context.get_input_path("StructZip")
        hcp_struct_list, hcp_struct_config = gear_preliminaries.preprocess_hcp_zip(
            hcp_struct_zip_filename
        )
        context.gear_dict["exclude_from_output"] = hcp_struct_list
        context.gear_dict["hcp_struct_config"] = hcp_struct_config
    except Exception as e:
        context.log.exception(e)
        context.log.error("Invalid hcp-struct zip file.")
        os.sys.exit(1)

    # Ensure the subject_id is set in a valid manner
    # (api, config, or hcp-struct config)
    try:
        gear_preliminaries.set_subject(context)
    except Exception as e:
        context.log.exception(e)
        context.log.fatal("The Subject ID is not valid. Examine and try again.",)
        os.sys.exit(1)

    ############################################################################
    # Build and Validate Parameters
    # Doing as much parameter checking before ANY computation.
    # Fail as fast as possible.

    try:
        # Build and validate from Volume Processing Pipeline
        DiffPreprocPipeline.build(context)
        DiffPreprocPipeline.validate(context)
    except Exception as e:
        context.log.exception(e)
        context.log.fatal(
            "Validating Parameters for the " "Diffusion Preprocessing Pipeline Failed!"
        )
        os.sys.exit(1)

    ###########################################################################
    # Unzip hcp-struct results
    try:
        gear_preliminaries.unzip_hcp(context, hcp_struct_zip_filename)
    except Exception as e:
        context.log.exception(e)
        context.log.fatal("Unzipping hcp-struct zipfile failed!")
        os.sys.exit(1)

    ############################################################################
    # Execute HCP Pipelines
    # Some hcp-func specific output parameters:
    (
        context.gear_dict["output_config"],
        context.gear_dict["output_config_filename"],
    ) = diff_utils.configs_to_export(context)

    context.gear_dict["output_zip_name"] = op.join(
        context.output_dir,
        "{}_{}_hcpdiff.zip".format(
            context.config["Subject"], context.config["DWIName"]
        ),
    )

    # context.gear_dict['remove_files'] = diff_utils.remove_intermediate_files
    ###########################################################################
    # Pipelines common commands
    # "QUEUE" is used differently in FSL 6.0... We don't use it here.
    # QUEUE = "-q"
    LogFileDirFull = op.join(context.work_dir, "logs")
    os.makedirs(LogFileDirFull, exist_ok=True)
    FSLSUBOPTIONS = "-l " + LogFileDirFull

    command_common = [
        op.join(context.gear_dict["environ"]["FSLDIR"], "bin", "fsl_sub"),
        FSLSUBOPTIONS,
    ]

    context.gear_dict["command_common"] = command_common

    # Execute Diffusion Processing Pipeline
    try:
        DiffPreprocPipeline.execute(context)
    except Exception as e:
        context.log.exception(e)
        context.log.fatal("The Diffusion Preprocessing Pipeline Failed!")
        if context.config["save-on-error"]:
            results.cleanup(context)
        os.sys.exit(1)

    # Generate Diffusion QC Images
    try:
        hcpdiff_qc_mosaic.build(context)
        hcpdiff_qc_mosaic.execute(context)
    except Exception as e:
        context.log.exception(e)
        context.log.fatal("HCP Diffusion QC Images has failed!")
        if context.config["save-on-error"]:
            results.cleanup(context)
        exit(1)

    ###########################################################################
    # Clean-up and output prep
    results.cleanup(context)

    os.sys.exit(0)