Esempio n. 1
0
def run_examples(example, pipelines, data_path, plugin=None):
    '''
    Run example workflows
    '''

    # Import packages
    from nipype import config
    from nipype.interfaces.base import CommandLine
    from nipype.utils import draw_gantt_chart
    from nipype.pipeline.plugins import log_nodes_cb

    if plugin is None:
        plugin = 'MultiProc'

    print('running example: %s with plugin: %s' % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == 'MultiProc':
        plugin_args['n_procs'] = cpu_count()

    __import__(example)

    for pipeline in pipelines:
        # Init and run workflow
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
        if os.path.exists(wf.base_dir):
            rmtree(wf.base_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), 'logs', example)
        if os.path.exists(log_dir):
            rmtree(log_dir)
        os.makedirs(log_dir)
        wf.config = {
            'execution': {
                'hash_method': 'timestamp',
                'stop_on_first_rerun': 'true',
                'write_provenance': 'true'
            }
        }

        # Callback log setup
        if example == 'fmri_spm_nested' and plugin == 'MultiProc' and \
           pipeline == 'l2pipeline':
            # Init callback log
            import logging
            cb_log_path = os.path.join(os.path.expanduser('~'), 'callback.log')
            cb_logger = logging.getLogger('callback')
            cb_logger.setLevel(logging.DEBUG)
            handler = logging.FileHandler(cb_log_path)
            cb_logger.addHandler(handler)
            plugin_args = {'n_procs': 4, 'status_callback': log_nodes_cb}
        else:
            plugin_args = {'n_procs': 4}
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass  # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)

        # Draw gantt chart only if pandas is installed
        try:
            import pandas
            pandas_flg = True
        except ImportError as exc:
            pandas_flg = False

        if plugin_args.has_key('status_callback') and pandas_flg:
            draw_gantt_chart.generate_gantt_chart(cb_log_path, 4)
            dst_log_html = os.path.join(os.path.expanduser('~'),
                                        'callback.log.html')
            copyfile(cb_log_path + '.html', dst_log_html)
Esempio n. 2
0
def build_collect_workflow(args, retval):
    import os
    import glob
    import warnings
    warnings.filterwarnings("ignore")
    import ast
    import pkg_resources
    from pathlib import Path
    import yaml
    import uuid
    from time import strftime
    import shutil

    try:
        import pynets

        print(f"\n\nPyNets Version:\n{pynets.__version__}\n\n")
    except ImportError:
        print("PyNets not installed! Ensure that you are using the correct"
              " python version.")

    # Set Arguments to global variables
    resources = args.pm
    if resources == "auto":
        from multiprocessing import cpu_count
        import psutil
        nthreads = cpu_count() - 1
        procmem = [
            int(nthreads),
            int(list(psutil.virtual_memory())[4] / 1000000000)
        ]
    else:
        procmem = list(eval(str(resources)))
    plugin_type = args.plug
    if isinstance(plugin_type, list):
        plugin_type = plugin_type[0]
    verbose = args.v
    working_path = args.basedir
    work_dir = args.work
    modality = args.modality
    drop_cols = args.dc
    if isinstance(modality, list):
        modality = modality[0]

    if os.path.isdir(work_dir):
        shutil.rmtree(work_dir)

    os.makedirs(f"{str(Path(working_path))}/{modality}_group_topology_auc",
                exist_ok=True)

    wf = collect_all(working_path, modality, drop_cols)

    with open(pkg_resources.resource_filename("pynets", "runconfig.yaml"),
              "r") as stream:
        try:
            hardcoded_params = yaml.load(stream)
            runtime_dict = {}
            execution_dict = {}
            for i in range(len(hardcoded_params["resource_dict"])):
                runtime_dict[list(hardcoded_params["resource_dict"][i].keys(
                ))[0]] = ast.literal_eval(
                    list(hardcoded_params["resource_dict"][i].values())[0][0])
            for i in range(len(hardcoded_params["execution_dict"])):
                execution_dict[list(
                    hardcoded_params["execution_dict"][i].keys())[0]] = list(
                        hardcoded_params["execution_dict"][i].values())[0][0]
        except FileNotFoundError:
            print("Failed to parse runconfig.yaml")

    run_uuid = f"{strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4()}"
    os.makedirs(f"{work_dir}/pynets_out_collection{run_uuid}", exist_ok=True)
    wf.base_dir = f"{work_dir}/pynets_out_collection{run_uuid}"

    if verbose is True:
        from nipype import config, logging

        cfg_v = dict(
            logging={
                "workflow_level": "DEBUG",
                "utils_level": "DEBUG",
                "interface_level": "DEBUG",
                "filemanip_level": "DEBUG",
                "log_directory": str(wf.base_dir),
                "log_to_file": True,
            },
            monitoring={
                "enabled": True,
                "sample_frequency": "0.1",
                "summary_append": True,
                "summary_file": str(wf.base_dir),
            },
        )
        logging.update_logging(config)
        config.update_config(cfg_v)
        config.enable_debug_mode()
        config.enable_resource_monitor()

        import logging

        callback_log_path = f"{wf.base_dir}{'/run_stats.log'}"
        logger = logging.getLogger("callback")
        logger.setLevel(logging.DEBUG)
        handler = logging.FileHandler(callback_log_path)
        logger.addHandler(handler)

    execution_dict["crashdump_dir"] = str(wf.base_dir)
    execution_dict["plugin"] = str(plugin_type)
    cfg = dict(execution=execution_dict)
    for key in cfg.keys():
        for setting, value in cfg[key].items():
            wf.config[key][setting] = value
    try:
        wf.write_graph(graph2use="colored", format="png")
    except BaseException:
        pass
    if verbose is True:
        from nipype.utils.profiler import log_nodes_cb

        plugin_args = {
            "n_procs": int(procmem[0]),
            "memory_gb": int(procmem[1]),
            "status_callback": log_nodes_cb,
            "scheduler": "mem_thread",
        }
    else:
        plugin_args = {
            "n_procs": int(procmem[0]),
            "memory_gb": int(procmem[1]),
            "scheduler": "mem_thread",
        }
    print("%s%s%s" % ("\nRunning with ", str(plugin_args), "\n"))
    wf.run(plugin=plugin_type, plugin_args=plugin_args)
    if verbose is True:
        from nipype.utils.draw_gantt_chart import generate_gantt_chart

        print("Plotting resource profile from run...")
        generate_gantt_chart(callback_log_path, cores=int(procmem[0]))
        handler.close()
        logger.removeHandler(handler)
    return
Esempio n. 3
0
def run_examples(example, pipelines, data_path, plugin=None):
    '''
    Run example workflows
    '''

    # Import packages
    from nipype import config
    from nipype.interfaces.base import CommandLine
    from nipype.utils import draw_gantt_chart
    from nipype.pipeline.plugins import log_nodes_cb

    if plugin is None:
        plugin = 'MultiProc'

    print('running example: %s with plugin: %s' % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == 'MultiProc':
        plugin_args['n_procs'] = cpu_count()

    __import__(example)

    for pipeline in pipelines:
        # Init and run workflow
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
        if os.path.exists(wf.base_dir):
            rmtree(wf.base_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), 'logs', example)
        if os.path.exists(log_dir):
            rmtree(log_dir)
        os.makedirs(log_dir)
        wf.config = {'execution': {'hash_method': 'timestamp',
                                   'stop_on_first_rerun': 'true',
                                   'write_provenance': 'true'}}

        # Callback log setup
        if example == 'fmri_spm_nested' and plugin == 'MultiProc' and \
           pipeline == 'l2pipeline':
            # Init callback log
            import logging
            cb_log_path = os.path.join(os.path.expanduser('~'), 'callback.log')
            cb_logger = logging.getLogger('callback')
            cb_logger.setLevel(logging.DEBUG)
            handler = logging.FileHandler(cb_log_path)
            cb_logger.addHandler(handler)
            plugin_args = {'n_procs' : 4, 'status_callback' : log_nodes_cb}
        else:
            plugin_args = {'n_procs' : 4}
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)

        # Draw gantt chart only if pandas is installed
        try:
            import pandas
            pandas_flg = True
        except ImportError as exc:
            pandas_flg = False

        if plugin_args.has_key('status_callback') and pandas_flg:
            draw_gantt_chart.generate_gantt_chart(cb_log_path, 4)
            dst_log_html = os.path.join(os.path.expanduser('~'), 'callback.log.html')
            copyfile(cb_log_path+'.html', dst_log_html)
Esempio n. 4
0
def build_collect_workflow(args, retval):
    import re
    import glob
    import warnings

    warnings.filterwarnings("ignore")
    import ast
    import pkg_resources
    from pathlib import Path
    import yaml

    try:
        import pynets

        print(f"\n\nPyNets Version:\n{pynets.__version__}\n\n")
    except ImportError:
        print(
            "PyNets not installed! Ensure that you are using the correct python version."
        )

    # Set Arguments to global variables
    resources = args.pm
    if resources:
        procmem = list(eval(str(resources)))
    else:
        from multiprocessing import cpu_count

        nthreads = cpu_count()
        procmem = [int(nthreads), int(float(nthreads) * 2)]
    plugin_type = args.plug
    if isinstance(plugin_type, list):
        plugin_type = plugin_type[0]
    verbose = args.v
    working_path = args.basedir
    work_dir = args.work
    modality = args.modality

    os.makedirs(f"{str(Path(working_path).parent)}/all_visits_netmets_auc",
                exist_ok=True)

    wf = collect_all(working_path, modality)

    with open(pkg_resources.resource_filename("pynets", "runconfig.yaml"),
              "r") as stream:
        try:
            hardcoded_params = yaml.load(stream)
            runtime_dict = {}
            execution_dict = {}
            for i in range(len(hardcoded_params["resource_dict"])):
                runtime_dict[list(hardcoded_params["resource_dict"][i].keys(
                ))[0]] = ast.literal_eval(
                    list(hardcoded_params["resource_dict"][i].values())[0][0])
            for i in range(len(hardcoded_params["execution_dict"])):
                execution_dict[list(
                    hardcoded_params["execution_dict"][i].keys())[0]] = list(
                        hardcoded_params["execution_dict"][i].values())[0][0]
        except FileNotFoundError:
            print("Failed to parse runconfig.yaml")

    os.makedirs(f"{work_dir}{'/pynets_out_collection'}", exist_ok=True)
    wf.base_dir = f"{work_dir}{'/pynets_out_collection'}"

    if verbose is True:
        from nipype import config, logging

        cfg_v = dict(
            logging={
                "workflow_level": "DEBUG",
                "utils_level": "DEBUG",
                "interface_level": "DEBUG",
                "filemanip_level": "DEBUG",
                "log_directory": str(wf.base_dir),
                "log_to_file": True,
            },
            monitoring={
                "enabled": True,
                "sample_frequency": "0.1",
                "summary_append": True,
                "summary_file": str(wf.base_dir),
            },
        )
        logging.update_logging(config)
        config.update_config(cfg_v)
        config.enable_debug_mode()
        config.enable_resource_monitor()

        import logging

        callback_log_path = f"{wf.base_dir}{'/run_stats.log'}"
        logger = logging.getLogger("callback")
        logger.setLevel(logging.DEBUG)
        handler = logging.FileHandler(callback_log_path)
        logger.addHandler(handler)

    execution_dict["crashdump_dir"] = str(wf.base_dir)
    execution_dict["plugin"] = str(plugin_type)
    cfg = dict(execution=execution_dict)
    for key in cfg.keys():
        for setting, value in cfg[key].items():
            wf.config[key][setting] = value
    try:
        wf.write_graph(graph2use="colored", format="png")
    except BaseException:
        pass
    if verbose is True:
        from nipype.utils.profiler import log_nodes_cb

        plugin_args = {
            "n_procs": int(procmem[0]),
            "memory_gb": int(procmem[1]),
            "status_callback": log_nodes_cb,
            "scheduler": "mem_thread",
        }
    else:
        plugin_args = {
            "n_procs": int(procmem[0]),
            "memory_gb": int(procmem[1]),
            "scheduler": "mem_thread",
        }
    print("%s%s%s" % ("\nRunning with ", str(plugin_args), "\n"))
    wf.run(plugin=plugin_type, plugin_args=plugin_args)
    if verbose is True:
        from nipype.utils.draw_gantt_chart import generate_gantt_chart

        print("Plotting resource profile from run...")
        generate_gantt_chart(callback_log_path, cores=int(procmem[0]))
        handler.close()
        logger.removeHandler(handler)

    files_ = glob.glob(
        f"{str(Path(working_path).parent)}{'/all_visits_netmets_auc/*clean.csv'}"
    )

    print("Aggregating dataframes...")
    dfs = []
    for file_ in files_:
        df = pd.read_csv(file_, chunksize=100000).read()
        try:
            df.drop(df.filter(regex="Unname"), axis=1, inplace=True)
        except BaseException:
            pass
        dfs.append(df)
        del df
    df_concat(dfs, working_path)

    return
Esempio n. 5
0
def main():
    """Entry point"""
    from nipype import config as ncfg
    from nipype.pipeline.engine import Workflow
    from mriqc.utils.bids import collect_bids_data
    from mriqc.workflows.core import build_workflow

    # Run parser
    opts = get_parser().parse_args()

    # Build settings dict
    bids_dir = op.abspath(opts.bids_dir)

    # Number of processes
    n_procs = opts.n_procs

    settings = {
        'bids_dir': bids_dir,
        'write_graph': opts.write_graph,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'n_procs': n_procs,
        'ants_nthreads': opts.ants_nthreads,
        'output_dir': op.abspath(opts.output_dir),
        'work_dir': op.abspath(opts.work_dir),
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32,
        'ica': opts.ica,
        'no_sub': opts.no_sub or opts.testing,
        'email': opts.email,
        'fd_thres': opts.fd_thres,
    }

    if not settings['no_sub']:
        MRIQC_LOG.warn('Anonymized quality metrics will be submitted'
                       ' to MRIQC\'s metrics repository.'
                       ' Use --no-sub to disable submission.')

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts. stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    log_dir = op.join(settings['output_dir'], 'logs')

    analysis_levels = opts.analysis_level
    if opts.participant_label is None:
        analysis_levels.append('group')
    analysis_levels = list(set(analysis_levels))
    if len(analysis_levels) > 2:
        raise RuntimeError('Error parsing analysis levels, got "%s"' % ', '.join(analysis_levels))

    settings['report_dir'] = opts.report_dir
    if not settings['report_dir']:
        settings['report_dir'] = op.join(settings['output_dir'], 'reports')

    check_folder(settings['output_dir'])
    if 'participant' in analysis_levels:
        check_folder(settings['work_dir'])

    check_folder(log_dir)
    check_folder(settings['report_dir'])

    # Set nipype config
    ncfg.update_config({
        'logging': {'log_directory': log_dir, 'log_to_file': True},
        'execution': {'crashdump_dir': log_dir, 'crashfile_format': 'txt'},
    })

    callback_log_path = None
    plugin_settings = {'plugin': 'Linear'}
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as pfile:
            plugin_settings = loadyml(pfile)
    else:
        # Setup multiprocessing
        if settings['n_procs'] == 0:
            settings['n_procs'] = cpu_count()

        if settings['ants_nthreads'] == 0:
            if settings['n_procs'] > 1:
                # always leave one extra thread for non ANTs work,
                # don't use more than 8 threads - the speed up is minimal
                settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)
            else:
                settings['ants_nthreads'] = 1

        if settings['n_procs'] > 1:
            plugin_settings['plugin'] = 'MultiProc'
            plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}
            if opts.mem_gb:
                plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    MRIQC_LOG.info(
        'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
        __version__, ', '.join(analysis_levels), opts.participant_label, settings)

    # Process data types
    modalities = opts.modalities

    dataset = collect_bids_data(
        settings['bids_dir'],
        modalities=modalities,
        participant_label=opts.participant_label,
        session=opts.session_id,
        run=opts.run_id,
        task=opts.task_id,
    )

    # Set up participant level
    if 'participant' in analysis_levels:
        workflow = Workflow(name='workflow_enumerator')
        workflow.base_dir = settings['work_dir']

        wf_list = []
        for mod in modalities:
            if not dataset[mod]:
                MRIQC_LOG.warn('No %s scans were found in %s', mod, settings['bids_dir'])
                continue

            wf_list.append(build_workflow(dataset[mod], mod, settings=settings))

        if wf_list:
            workflow.add_nodes(wf_list)

            if not opts.dry_run:
                if plugin_settings['plugin'] == 'MultiProc' and opts.profile:
                    import logging
                    from nipype.pipeline.plugins.callback_log import log_nodes_cb
                    plugin_settings['plugin_args']['status_callback'] = log_nodes_cb
                    callback_log_path = op.join(log_dir, 'run_stats.log')
                    logger = logging.getLogger('callback')
                    logger.setLevel(logging.DEBUG)
                    handler = logging.FileHandler(callback_log_path)
                    logger.addHandler(handler)

                workflow.run(**plugin_settings)
                if not settings['no_sub']:
                    MRIQC_LOG.warn(
                        'Anonymized quality metrics have beeen submitted'
                        ' to MRIQC\'s metrics repository.'
                        ' Use --no-sub to disable submission.')
                if callback_log_path is not None:
                    from nipype.utils.draw_gantt_chart import generate_gantt_chart
                    generate_gantt_chart(callback_log_path, cores=settings['n_procs'])
        else:
            raise RuntimeError('Error reading BIDS directory (%s), or the dataset is not '
                               'BIDS-compliant.' % settings['bids_dir'])

    # Set up group level
    if 'group' in analysis_levels:
        from mriqc.reports import group_html
        from mriqc.utils.misc import generate_csv, generate_pred

        reports_dir = check_folder(op.join(settings['output_dir'], 'reports'))
        derivatives_dir = op.join(settings['output_dir'], 'derivatives')

        n_group_reports = 0
        for mod in modalities:
            dataframe, out_csv = generate_csv(derivatives_dir,
                                              settings['output_dir'], mod)

            # If there are no iqm.json files, nothing to do.
            if dataframe is None:
                MRIQC_LOG.warn(
                    'No IQM-JSON files were found for the %s data type in %s. The group-level '
                    'report was not generated.', mod, derivatives_dir)
                continue

            MRIQC_LOG.info('Summary CSV table for the %s data generated (%s)', mod, out_csv)

            # out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
            # if out_pred is not None:
            #     MRIQC_LOG.info('Predicted QA CSV table for the %s data generated (%s)',
            #                    mod, out_pred)

            out_html = op.join(reports_dir, mod + '_group.html')
            group_html(out_csv, mod,
                       csv_failed=op.join(settings['output_dir'], 'failed_' + mod + '.csv'),
                       out_file=out_html)
            MRIQC_LOG.info('Group-%s report generated (%s)', mod, out_html)
            n_group_reports += 1

        if n_group_reports == 0:
            raise Exception("No data found. No group level reports were generated.")
Esempio n. 6
0
def main():
    """Entry point"""
    from nipype import config as ncfg
    from nipype.pipeline.engine import Workflow
    from mriqc import DEFAULTS
    from mriqc.utils.bids import collect_bids_data
    from mriqc.workflows.core import build_workflow
    # from mriqc.reports.utils import check_reports

    parser = ArgumentParser(description='MRI Quality Control',
                            formatter_class=RawTextHelpFormatter)

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='mriqc v{}'.format(__version__))

    parser.add_argument('bids_dir',
                        action='store',
                        help='The directory with the input dataset '
                        'formatted according to the BIDS standard.')
    parser.add_argument(
        'output_dir',
        action='store',
        help='The directory where the output files '
        'should be stored. If you are running group level analysis '
        'this folder should be prepopulated with the results of the'
        'participant level analysis.')
    parser.add_argument(
        'analysis_level',
        action='store',
        nargs='+',
        help='Level of the analysis that will be performed. '
        'Multiple participant level analyses can be run independently '
        '(in parallel) using the same output_dir.',
        choices=['participant', 'group'])
    parser.add_argument(
        '--participant_label',
        '--subject_list',
        '-S',
        action='store',
        help='The label(s) of the participant(s) that should be analyzed. '
        'The label corresponds to sub-<participant_label> from the '
        'BIDS spec (so it does not include "sub-"). If this parameter '
        'is not provided all subjects should be analyzed. Multiple '
        'participants can be specified with a space separated list.',
        nargs="*")

    g_input = parser.add_argument_group('mriqc specific inputs')
    g_input.add_argument('-m',
                         '--modalities',
                         action='store',
                         nargs='*',
                         choices=['T1w', 'bold', 'T2w'],
                         default=['T1w', 'bold', 'T2w'])
    g_input.add_argument('-s', '--session-id', action='store')
    g_input.add_argument('-r', '--run-id', action='store')
    g_input.add_argument('--nthreads',
                         action='store',
                         type=int,
                         help='number of threads')
    g_input.add_argument('--n_procs',
                         action='store',
                         default=0,
                         type=int,
                         help='number of threads')
    g_input.add_argument('--mem_gb',
                         action='store',
                         default=0,
                         type=int,
                         help='available total memory')
    g_input.add_argument('--write-graph',
                         action='store_true',
                         default=False,
                         help='Write workflow graph.')
    g_input.add_argument('--dry-run',
                         action='store_true',
                         default=False,
                         help='Do not run the workflow.')
    g_input.add_argument('--use-plugin',
                         action='store',
                         default=None,
                         help='nipype plugin configuration file')
    g_input.add_argument(
        '--ica',
        action='store_true',
        default=False,
        help='Run ICA on the raw data and include the components'
        'in the individual reports (slow but potentially very insightful)')

    g_input.add_argument('--testing',
                         action='store_true',
                         default=False,
                         help='use testing settings for a minimal footprint')
    g_input.add_argument(
        '--profile',
        action='store_true',
        default=False,
        help='hook up the resource profiler callback to nipype')
    g_input.add_argument(
        '--hmc-afni',
        action='store_true',
        default=True,
        help='Use ANFI 3dvolreg for head motion correction (HMC)')
    g_input.add_argument(
        '--hmc-fsl',
        action='store_true',
        default=False,
        help='Use FSL MCFLIRT for head motion correction (HMC)')
    g_input.add_argument(
        '-f',
        '--float32',
        action='store_true',
        default=DEFAULTS['float32'],
        help=
        "Cast the input data to float32 if it's represented in higher precision "
        "(saves space and improves perfomance)")
    g_input.add_argument('--fft-spikes-detector',
                         action='store_true',
                         default=False,
                         help='Turn on FFT based spike detector (slow).')

    g_outputs = parser.add_argument_group('mriqc specific outputs')
    g_outputs.add_argument('-w',
                           '--work-dir',
                           action='store',
                           default=op.join(os.getcwd(), 'work'))
    g_outputs.add_argument('--report-dir', action='store')
    g_outputs.add_argument('--verbose-reports',
                           default=False,
                           action='store_true')

    # ANTs options
    g_ants = parser.add_argument_group(
        'specific settings for ANTs registrations')
    g_ants.add_argument(
        '--ants-nthreads',
        action='store',
        type=int,
        default=DEFAULTS['ants_nthreads'],
        help='number of threads that will be set in ANTs processes')
    g_ants.add_argument('--ants-settings',
                        action='store',
                        help='path to JSON file with settings for ANTS')

    # AFNI head motion correction settings
    g_afni = parser.add_argument_group(
        'specific settings for AFNI head motion correction')
    g_afni.add_argument(
        '--deoblique',
        action='store_true',
        default=False,
        help='Deoblique the functional scans during head motion '
        'correction preprocessing')
    g_afni.add_argument(
        '--despike',
        action='store_true',
        default=False,
        help='Despike the functional scans during head motion correction '
        'preprocessing')
    g_afni.add_argument(
        '--start-idx',
        action='store',
        type=int,
        help='Initial volume in functional timeseries that should be '
        'considered for preprocessing')
    g_afni.add_argument(
        '--stop-idx',
        action='store',
        type=int,
        help='Final volume in functional timeseries that should be '
        'considered for preprocessing')
    g_afni.add_argument('--correct-slice-timing',
                        action='store_true',
                        default=False,
                        help='Perform slice timing correction')

    opts = parser.parse_args()

    # Build settings dict
    bids_dir = op.abspath(opts.bids_dir)

    # Number of processes
    n_procs = 0
    if opts.nthreads is not None:
        MRIQC_LOG.warn('Option --nthreads has been deprecated in mriqc 0.8.8. '
                       'Please use --n_procs instead.')
        n_procs = opts.nthreads
    if opts.n_procs is not None:
        n_procs = opts.n_procs

    settings = {
        'bids_dir': bids_dir,
        'write_graph': opts.write_graph,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'n_procs': n_procs,
        'ants_nthreads': opts.ants_nthreads,
        'output_dir': op.abspath(opts.output_dir),
        'work_dir': op.abspath(opts.work_dir),
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32,
        'ica': opts.ica
    }

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts.stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    log_dir = op.join(settings['output_dir'], 'logs')

    analysis_levels = opts.analysis_level
    if opts.participant_label is None:
        analysis_levels.append('group')
    analysis_levels = list(set(analysis_levels))
    if len(analysis_levels) > 2:
        raise RuntimeError('Error parsing analysis levels, got "%s"' %
                           ', '.join(analysis_levels))

    settings['report_dir'] = opts.report_dir
    if not settings['report_dir']:
        settings['report_dir'] = op.join(settings['output_dir'], 'reports')

    check_folder(settings['output_dir'])
    if 'participant' in analysis_levels:
        check_folder(settings['work_dir'])

    check_folder(log_dir)
    check_folder(settings['report_dir'])

    # Set nipype config
    ncfg.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': log_dir,
            'crashfile_format': 'txt'
        },
    })

    callback_log_path = None
    plugin_settings = {'plugin': 'Linear'}
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as pfile:
            plugin_settings = loadyml(pfile)
    else:
        # Setup multiprocessing
        if settings['n_procs'] == 0:
            settings['n_procs'] = 1
            max_parallel_ants = cpu_count() // settings['ants_nthreads']
            if max_parallel_ants > 1:
                settings['n_procs'] = max_parallel_ants

        if settings['n_procs'] > 1:
            plugin_settings['plugin'] = 'MultiProc'
            plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}
            if opts.mem_gb:
                plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    MRIQC_LOG.info(
        'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
        __version__, ', '.join(analysis_levels), opts.participant_label,
        settings)

    # Process data types
    modalities = opts.modalities

    dataset = collect_bids_data(settings['bids_dir'],
                                participant_label=opts.participant_label)

    # Set up participant level
    if 'participant' in analysis_levels:
        workflow = Workflow(name='workflow_enumerator')
        workflow.base_dir = settings['work_dir']

        wf_list = []
        for mod in modalities:
            if not dataset[mod]:
                MRIQC_LOG.warn('No %s scans were found in %s', mod,
                               settings['bids_dir'])
                continue

            wf_list.append(build_workflow(dataset[mod], mod,
                                          settings=settings))

        if wf_list:
            workflow.add_nodes(wf_list)

            if not opts.dry_run:
                if opts.profile:
                    import logging
                    from nipype.pipeline.plugins.callback_log import log_nodes_cb
                    plugin_settings['plugin_args'][
                        'status_callback'] = log_nodes_cb
                    callback_log_path = op.join(log_dir, 'run_stats.log')
                    logger = logging.getLogger('callback')
                    logger.setLevel(logging.DEBUG)
                    handler = logging.FileHandler(callback_log_path)
                    logger.addHandler(handler)

                workflow.run(**plugin_settings)
                if callback_log_path is not None:
                    from nipype.utils.draw_gantt_chart import generate_gantt_chart
                    generate_gantt_chart(callback_log_path,
                                         cores=settings['n_procs'])
        else:
            raise RuntimeError(
                'Error reading BIDS directory (%s), or the dataset is not '
                'BIDS-compliant.' % settings['bids_dir'])

    # Set up group level
    if 'group' in analysis_levels:
        from mriqc.reports import group_html
        from mriqc.utils.misc import generate_csv, generate_pred

        reports_dir = check_folder(op.join(settings['output_dir'], 'reports'))
        derivatives_dir = op.join(settings['output_dir'], 'derivatives')

        n_group_reports = 0
        for mod in modalities:
            dataframe, out_csv = generate_csv(derivatives_dir,
                                              settings['output_dir'], mod)

            # If there are no iqm.json files, nothing to do.
            if dataframe is None:
                MRIQC_LOG.warn(
                    'No IQM-JSON files were found for the %s data type in %s. The group-level '
                    'report was not generated.', mod, derivatives_dir)
                continue

            MRIQC_LOG.info('Summary CSV table for the %s data generated (%s)',
                           mod, out_csv)

            out_pred = generate_pred(derivatives_dir, settings['output_dir'],
                                     mod)
            if out_pred is not None:
                MRIQC_LOG.info(
                    'Predicted QA CSV table for the %s data generated (%s)',
                    mod, out_pred)

            out_html = op.join(reports_dir, mod + '_group.html')
            group_html(out_csv,
                       mod,
                       csv_failed=op.join(settings['output_dir'],
                                          'failed_' + mod + '.csv'),
                       out_file=out_html)
            MRIQC_LOG.info('Group-%s report generated (%s)', mod, out_html)
            n_group_reports += 1

        if n_group_reports == 0:
            raise Exception(
                "No data found. No group level reports were generated.")