Example #1
0
    def create(self):  #, **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print("=" * 80)
        print(csvOut.outputs.__dict__)
        print("=" * 80)

        iters = {}
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['trains'] = subsample_crossValidationSet(
            result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'trainindex', 'testindex']
        inputsND = Node(interface=IdentityInterface(fields=out_fields),
                        run_without_submitting=True,
                        name='inputs')
        inputsND.iterables = [('trainindex', iters['trains']),
                              ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__['t1']
            inputsND.inputs.Label = csvOut.outputs.__dict__['label']
            inputsND.inputs.T2 = csvOut.outputs.__dict__['t2']
            pass  #TODO
        metaflow = Workflow(name='metaflow')
        metaflow.config['execution'] = {
            'plugin': 'Linear',
            'stop_on_first_crash': 'false',
            'stop_on_first_rerun':
            'false',  # This stops at first attempt to rerun, before running, and before deleting previous results.
            'hash_method': 'timestamp',
            'single_thread_matlab':
            'true',  # Multi-core 2011a  multi-core for matrix multiplication.
            'remove_unnecessary_outputs': 'true',
            'use_relative_paths':
            'false',  # relative paths should be on, require hash update when changed.
            'remove_node_directories': 'false',  # Experimental
            'local_hash_check': 'false'
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect([
            (metaflow, fusionflow, [('inputs.trainindex', 'trainT1s.index'),
                                    ('inputs.T1', 'trainT1s.inlist')]),
            (metaflow, fusionflow, [('inputs.trainindex', 'trainLabels.index'),
                                    ('inputs.Label', 'trainLabels.inlist')]),
            (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'),
                                    ('inputs.T1', 'testT1s.inlist')])
        ])
Example #2
0
    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['trains'] = subsample_crossValidationSet(result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'trainindex', 'testindex']
        inputsND = Node(interface=IdentityInterface(fields=out_fields),
                        run_without_submitting=True, name='inputs')
        inputsND.iterables = [('trainindex', iters['trains']),
                              ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__['t1']
            inputsND.inputs.Label = csvOut.outputs.__dict__['label']
            inputsND.inputs.T2 = csvOut.outputs.__dict__['t2']
            pass  # TODO
        metaflow = Workflow(name='metaflow')
        metaflow.config['execution'] = {
            'plugin': 'Linear',
            'stop_on_first_crash': 'false',
            'stop_on_first_rerun': 'false',
        # This stops at first attempt to rerun, before running, and before deleting previous results.
            'hash_method': 'timestamp',
            'single_thread_matlab': 'true',  # Multi-core 2011a  multi-core for matrix multiplication.
            'remove_unnecessary_outputs': 'true',
            'use_relative_paths': 'false',  # relative paths should be on, require hash update when changed.
            'remove_node_directories': 'false',  # Experimental
            'local_hash_check': 'false'
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [(metaflow, fusionflow, [('inputs.trainindex', 'trainT1s.index'), ('inputs.T1', 'trainT1s.inlist')]),
             (metaflow, fusionflow,
              [('inputs.trainindex', 'trainLabels.index'), ('inputs.Label', 'trainLabels.inlist')]),
             (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'), ('inputs.T1', 'testT1s.inlist')])
             ])
Example #3
0
    def create(self):  #, **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        iters = {}
        label = csvOut.outputs.__dict__.keys()[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['samples'] = sample_test_lists(
            result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'sampleindex', 'testindex']
        inputs = Node(interface=IdentityInterface(fields=out_fields),
                      run_without_submitting=True,
                      name='inputs')
        inputs.iterables = [('sampleindex', iters['samples']),
                            ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputs.inputs.T1 = csvOut.outputs.column_0
            inputs.inputs.Label = csvOut.outputs.column_1
            inputs.inputs.T2 = csvOut.outputs.column_2
        else:
            pass  #TODO
        metaflow = Workflow(name='metaflow')
        metaflow.add_nodes([inputs])
        import pdb
        pdb.set_trace()
        fusionflow = FusionLabelWorkflow()
        self.connect([
            (metaflow, fusionflow, [('inputs.sampleindex', 'sampleT1s.index'),
                                    ('inputs.T1', 'sampleT1s.inlist')]),
            (metaflow, fusionflow, [('inputs.sampleindex', 'sampleT2s.index'),
                                    ('inputs.T2', 'sampleT2s.inlist')]),
            (metaflow, fusionflow, [('inputs.sampleindex',
                                     'sampleLabels.index'),
                                    ('inputs.Label', 'sampleLabels.inlist')]),
            (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'),
                                    ('inputs.T1', 'testT1s.inlist')]),
            (metaflow, fusionflow, [('inputs.testindex', 'testT2s.index'),
                                    ('inputs.T2', 'testT2s.inlist')]),
            (metaflow, fusionflow, [('inputs.testindex', 'testLabels.index'),
                                    ('inputs.Label', 'testLabels.inlist')])
        ])
Example #4
0
def init_mriqc_wf():
    """Create a multi-subject MRIQC workflow."""
    from .. import config

    workflow = Workflow(name="mriqc_wf")
    workflow.base_dir = config.execution.work_dir

    if "bold" in config.workflow.inputs:
        workflow.add_nodes([fmri_qc_workflow()])

    if set(("T1w", "T2w")).intersection(
        config.workflow.inputs.keys()
    ):
        workflow.add_nodes([anat_qc_workflow()])

    if not workflow._get_all_nodes():
        return None

    return workflow
Example #5
0
def init_mriqc_wf():
    """Create a multi-subject MRIQC workflow."""
    from mriqc import config

    # Create parent workflow
    workflow = Workflow(name="mriqc_wf")
    workflow.base_dir = config.execution.work_dir

    # Create fMRI QC workflow
    if FMRI_KEY in config.workflow.inputs:
        workflow.add_nodes([fmri_qc_workflow()])

    # Create sMRI QC workflow
    input_keys = config.workflow.inputs.keys()
    anatomical_flag = any(key in input_keys for key in ANATOMICAL_KEYS)
    if anatomical_flag:
        workflow.add_nodes([anat_qc_workflow()])

    # Return non-empty workflow, else None
    if workflow._get_all_nodes():
        return workflow
Example #6
0
File: spm12.py Project: xgrg/alfa
def create_workflow(sources, subjects, basedir=None):
    import os.path as osp
    import tempfile
    from nipype.pipeline.engine import Workflow, Node
    if len(sources) != len(subjects):
        raise Exception('Input files and subjects should be of equal size.')
    wf_name = 'spm12_%s'%subjects[0] if len(subjects) == 1 else 'spm12'
    if len(sources) == 1 and basedir is None:
        wf_basedir = osp.dirname(sources[0])
    elif not basedir is None:
        wf_basedir = basedir
    else:
        wf_basedir = tempfile.mkdtemp()

    w = Workflow(wf_name, base_dir = wf_basedir)
    nodes = []
    for subject, source in zip(subjects, sources):
        nodes.extend(create_nodes(source, subject))
    w.add_nodes(nodes)

    for i in range(0, len(nodes), 3):
        w.connect(nodes[i], 'coregistered_source', nodes[i+1], 'source')
        w.connect(nodes[i+1], 'coregistered_source', nodes[i+2], 'channel_files')
    return w
Example #7
0
def main():
    """Entry point"""
    from nipype import config as ncfg
    from nipype.pipeline.engine import Workflow
    from mriqc import DEFAULTS
    from mriqc.utils.bids import collect_bids_data
    from mriqc.workflows.core import build_workflow
    # from mriqc.reports.utils import check_reports

    parser = ArgumentParser(description='MRI Quality Control',
                            formatter_class=RawTextHelpFormatter)

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='mriqc v{}'.format(__version__))

    parser.add_argument('bids_dir',
                        action='store',
                        help='The directory with the input dataset '
                        'formatted according to the BIDS standard.')
    parser.add_argument(
        'output_dir',
        action='store',
        help='The directory where the output files '
        'should be stored. If you are running group level analysis '
        'this folder should be prepopulated with the results of the'
        'participant level analysis.')
    parser.add_argument(
        'analysis_level',
        action='store',
        nargs='+',
        help='Level of the analysis that will be performed. '
        'Multiple participant level analyses can be run independently '
        '(in parallel) using the same output_dir.',
        choices=['participant', 'group'])
    parser.add_argument(
        '--participant_label',
        '--subject_list',
        '-S',
        action='store',
        help='The label(s) of the participant(s) that should be analyzed. '
        'The label corresponds to sub-<participant_label> from the '
        'BIDS spec (so it does not include "sub-"). If this parameter '
        'is not provided all subjects should be analyzed. Multiple '
        'participants can be specified with a space separated list.',
        nargs="*")

    g_input = parser.add_argument_group('mriqc specific inputs')
    g_input.add_argument('-m',
                         '--modalities',
                         action='store',
                         nargs='*',
                         choices=['T1w', 'bold', 'T2w'],
                         default=['T1w', 'bold', 'T2w'])
    g_input.add_argument('-s', '--session-id', action='store')
    g_input.add_argument('-r', '--run-id', action='store')
    g_input.add_argument('--nthreads',
                         action='store',
                         type=int,
                         help='number of threads')
    g_input.add_argument('--n_procs',
                         action='store',
                         default=0,
                         type=int,
                         help='number of threads')
    g_input.add_argument('--mem_gb',
                         action='store',
                         default=0,
                         type=int,
                         help='available total memory')
    g_input.add_argument('--write-graph',
                         action='store_true',
                         default=False,
                         help='Write workflow graph.')
    g_input.add_argument('--dry-run',
                         action='store_true',
                         default=False,
                         help='Do not run the workflow.')
    g_input.add_argument('--use-plugin',
                         action='store',
                         default=None,
                         help='nipype plugin configuration file')

    g_input.add_argument('--testing',
                         action='store_true',
                         default=False,
                         help='use testing settings for a minimal footprint')
    g_input.add_argument(
        '--hmc-afni',
        action='store_true',
        default=True,
        help='Use ANFI 3dvolreg for head motion correction (HMC)')
    g_input.add_argument(
        '--hmc-fsl',
        action='store_true',
        default=False,
        help='Use FSL MCFLIRT for head motion correction (HMC)')
    g_input.add_argument(
        '-f',
        '--float32',
        action='store_true',
        default=DEFAULTS['float32'],
        help=
        "Cast the input data to float32 if it's represented in higher precision "
        "(saves space and improves perfomance)")
    g_input.add_argument('--fft-spikes-detector',
                         action='store_true',
                         default=False,
                         help='Turn on FFT based spike detector (slow).')

    g_outputs = parser.add_argument_group('mriqc specific outputs')
    g_outputs.add_argument('-w',
                           '--work-dir',
                           action='store',
                           default=op.join(os.getcwd(), 'work'))
    g_outputs.add_argument('--report-dir', action='store')
    g_outputs.add_argument('--verbose-reports',
                           default=False,
                           action='store_true')

    # ANTs options
    g_ants = parser.add_argument_group(
        'specific settings for ANTs registrations')
    g_ants.add_argument(
        '--ants-nthreads',
        action='store',
        type=int,
        default=DEFAULTS['ants_nthreads'],
        help='number of threads that will be set in ANTs processes')
    g_ants.add_argument('--ants-settings',
                        action='store',
                        help='path to JSON file with settings for ANTS')

    # AFNI head motion correction settings
    g_afni = parser.add_argument_group(
        'specific settings for AFNI head motion correction')
    g_afni.add_argument(
        '--deoblique',
        action='store_true',
        default=False,
        help='Deoblique the functional scans during head motion '
        'correction preprocessing')
    g_afni.add_argument(
        '--despike',
        action='store_true',
        default=False,
        help='Despike the functional scans during head motion correction '
        'preprocessing')
    g_afni.add_argument(
        '--start-idx',
        action='store',
        type=int,
        help='Initial volume in functional timeseries that should be '
        'considered for preprocessing')
    g_afni.add_argument(
        '--stop-idx',
        action='store',
        type=int,
        help='Final volume in functional timeseries that should be '
        'considered for preprocessing')
    g_afni.add_argument('--correct-slice-timing',
                        action='store_true',
                        default=False,
                        help='Perform slice timing correction')

    opts = parser.parse_args()

    # Build settings dict
    bids_dir = op.abspath(opts.bids_dir)

    # Number of processes
    n_procs = 0
    if opts.nthreads is not None:
        MRIQC_LOG.warn('Option --nthreads has been deprecated in mriqc 0.8.8. '
                       'Please use --n_procs instead.')
        n_procs = opts.nthreads
    if opts.n_procs is not None:
        n_procs = opts.n_procs

    # Check physical memory
    total_memory = opts.mem_gb
    if total_memory < 0:
        try:
            from psutil import virtual_memory
            total_memory = virtual_memory().total // (1024**3) + 1
        except ImportError:
            MRIQC_LOG.warn(
                'Total physical memory could not be estimated, using %d'
                'GB as default', DEFAULT_MEM_GB)
            total_memory = DEFAULT_MEM_GB

    if total_memory > 0:
        av_procs = total_memory // 4
        if av_procs < 1:
            MRIQC_LOG.warn(
                'Total physical memory is less than 4GB, memory allocation'
                ' problems are likely to occur.')
            n_procs = 1
        elif n_procs > av_procs:
            n_procs = av_procs

    settings = {
        'bids_dir': bids_dir,
        'write_graph': opts.write_graph,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'n_procs': n_procs,
        'ants_nthreads': opts.ants_nthreads,
        'output_dir': op.abspath(opts.output_dir),
        'work_dir': op.abspath(opts.work_dir),
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32
    }

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts.stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    log_dir = op.join(settings['output_dir'], 'logs')

    analysis_levels = opts.analysis_level
    if opts.participant_label is None:
        analysis_levels.append('group')
    analysis_levels = list(set(analysis_levels))
    if len(analysis_levels) > 2:
        raise RuntimeError('Error parsing analysis levels, got "%s"' %
                           ', '.join(analysis_levels))

    settings['report_dir'] = opts.report_dir
    if not settings['report_dir']:
        settings['report_dir'] = op.join(settings['output_dir'], 'reports')

    check_folder(settings['output_dir'])
    if 'participant' in analysis_levels:
        check_folder(settings['work_dir'])

    check_folder(log_dir)
    check_folder(settings['report_dir'])

    # Set nipype config
    ncfg.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': log_dir
        }
    })

    plugin_settings = {'plugin': 'Linear'}
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as pfile:
            plugin_settings = loadyml(pfile)
    else:
        # Setup multiprocessing
        if settings['n_procs'] == 0:
            settings['n_procs'] = 1
            max_parallel_ants = cpu_count() // settings['ants_nthreads']
            if max_parallel_ants > 1:
                settings['n_procs'] = max_parallel_ants

        if settings['n_procs'] > 1:
            plugin_settings['plugin'] = 'MultiProc'
            plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}

    MRIQC_LOG.info(
        'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
        __version__, ', '.join(analysis_levels), opts.participant_label,
        settings)

    # Process data types
    modalities = opts.modalities

    dataset = collect_bids_data(settings['bids_dir'],
                                participant_label=opts.participant_label)

    # Set up participant level
    if 'participant' in analysis_levels:
        workflow = Workflow(name='workflow_enumerator')
        workflow.base_dir = settings['work_dir']

        wf_list = []
        for mod in modalities:
            if not dataset[mod]:
                MRIQC_LOG.warn('No %s scans were found in %s', mod,
                               settings['bids_dir'])
                continue

            wf_list.append(build_workflow(dataset[mod], mod,
                                          settings=settings))

        if wf_list:
            workflow.add_nodes(wf_list)

            if not opts.dry_run:
                workflow.run(**plugin_settings)
        else:
            raise RuntimeError(
                'Error reading BIDS directory (%s), or the dataset is not '
                'BIDS-compliant.' % settings['bids_dir'])

    # Set up group level
    if 'group' in analysis_levels:
        from mriqc.reports import group_html
        from mriqc.utils.misc import generate_csv, generate_pred

        reports_dir = check_folder(op.join(settings['output_dir'], 'reports'))
        derivatives_dir = op.join(settings['output_dir'], 'derivatives')

        n_group_reports = 0
        for mod in modalities:
            dataframe, out_csv = generate_csv(derivatives_dir,
                                              settings['output_dir'], mod)

            # If there are no iqm.json files, nothing to do.
            if dataframe is None:
                MRIQC_LOG.warn(
                    'No IQM-JSON files were found for the %s data type in %s. The group-level '
                    'report was not generated.', mod, derivatives_dir)
                continue

            MRIQC_LOG.info('Summary CSV table for the %s data generated (%s)',
                           mod, out_csv)

            out_pred = generate_pred(derivatives_dir, settings['output_dir'],
                                     mod)
            if out_pred is not None:
                MRIQC_LOG.info(
                    'Predicted QA CSV table for the %s data generated (%s)',
                    mod, out_pred)

            out_html = op.join(reports_dir, mod + '_group.html')
            group_html(out_csv,
                       mod,
                       csv_failed=op.join(settings['output_dir'],
                                          'failed_' + mod + '.csv'),
                       out_file=out_html)
            MRIQC_LOG.info('Group-%s report generated (%s)', mod, out_html)
            n_group_reports += 1

        if n_group_reports == 0:
            raise Exception(
                "No data found. No group level reports were generated.")
Example #8
0
    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """

        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters["tests"], iters["trains"] = sample_crossvalidation_set(
            result, self.sample_size.default_value
        )
        # Main event
        out_fields = ["T1", "T2", "Label", "trainindex", "testindex"]
        inputsND = Node(
            interface=IdentityInterface(fields=out_fields),
            run_without_submitting=True,
            name="inputs",
        )
        inputsND.iterables = [
            ("trainindex", iters["trains"]),
            ("testindex", iters["tests"]),
        ]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__["t1"]
            inputsND.inputs.Label = csvOut.outputs.__dict__["label"]
            inputsND.inputs.T2 = csvOut.outputs.__dict__["t2"]
            pass  # TODO
        metaflow = Workflow(name="metaflow")
        metaflow.config["execution"] = {
            "plugin": "Linear",
            "stop_on_first_crash": "false",
            "stop_on_first_rerun": "false",
            # This stops at first attempt to rerun, before running, and before deleting previous results.
            "hash_method": "timestamp",
            "single_thread_matlab": "true",  # Multi-core 2011a  multi-core for matrix multiplication.
            "remove_unnecessary_outputs": "true",
            "use_relative_paths": "false",  # relative paths should be on, require hash update when changed.
            "remove_node_directories": "false",  # Experimental
            "local_hash_check": "false",
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.trainindex", "trainT1s.index"),
                        ("inputs.T1", "trainT1s.inlist"),
                    ],
                ),
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.trainindex", "trainLabels.index"),
                        ("inputs.Label", "trainLabels.inlist"),
                    ],
                ),
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.testindex", "testT1s.index"),
                        ("inputs.T1", "testT1s.inlist"),
                    ],
                ),
            ]
        )
Example #9
0
def init_mriqc(opts, retval):
    """Build the workflow enumerator"""

    from bids.grabbids import BIDSLayout
    from nipype import config as ncfg
    from nipype.pipeline.engine import Workflow

    from ..utils.bids import collect_bids_data
    from ..workflows.core import build_workflow

    retval['workflow'] = None
    retval['plugin_settings'] = None

    # Build settings dict
    bids_dir = Path(opts.bids_dir).expanduser()
    output_dir = Path(opts.output_dir).expanduser()

    # Number of processes
    n_procs = opts.n_procs or cpu_count()

    settings = {
        'bids_dir': bids_dir.resolve(),
        'output_dir': output_dir.resolve(),
        'work_dir': opts.work_dir.expanduser().resolve(),
        'write_graph': opts.write_graph,
        'n_procs': n_procs,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'ants_nthreads': opts.ants_nthreads,
        'ants_float': opts.ants_float,
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32,
        'ica': opts.ica,
        'no_sub': opts.no_sub,
        'email': opts.email,
        'fd_thres': opts.fd_thres,
        'webapi_url': opts.webapi_url,
        'webapi_port': opts.webapi_port,
        'upload_strict': opts.upload_strict,
    }

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts. stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    if opts.dsname:
        settings['dataset_name'] = opts.dsname

    log_dir = settings['output_dir'] / 'logs'

    # Create directories
    log_dir.mkdir(parents=True, exist_ok=True)
    settings['work_dir'].mkdir(parents=True, exist_ok=True)

    # Set nipype config
    ncfg.update_config({
        'logging': {'log_directory': str(log_dir), 'log_to_file': True},
        'execution': {
            'crashdump_dir': str(log_dir), 'crashfile_format': 'txt',
            'resource_monitor': opts.profile},
    })

    # Plugin configuration
    plugin_settings = {}
    if n_procs == 1:
        plugin_settings['plugin'] = 'Linear'

        if settings['ants_nthreads'] == 0:
            settings['ants_nthreads'] = 1
    else:
        plugin_settings['plugin'] = 'MultiProc'
        plugin_settings['plugin_args'] = {'n_procs': n_procs}
        if opts.mem_gb:
            plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

        if settings['ants_nthreads'] == 0:
            # always leave one extra thread for non ANTs work,
            # don't use more than 8 threads - the speed up is minimal
            settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)

    # Overwrite options if --use-plugin provided
    if opts.use_plugin and opts.use_plugin.exists():
        from yaml import load as loadyml
        with opts.use_plugin.open() as pfile:
            plugin_settings.update(loadyml(pfile))

    # Process data types
    modalities = opts.modalities

    layout = BIDSLayout(str(settings['bids_dir']),
                        exclude=['derivatives', 'sourcedata'])
    dataset = collect_bids_data(
        layout,
        participant_label=opts.participant_label,
        session=opts.session_id,
        run=opts.run_id,
        task=opts.task_id,
        bids_type=modalities,
    )

    workflow = Workflow(name='workflow_enumerator')
    workflow.base_dir = settings['work_dir']

    wf_list = []
    subject_list = []
    for mod in modalities:
        if dataset[mod]:
            wf_list.append(build_workflow(dataset[mod], mod, settings=settings))
            subject_list += dataset[mod]

    retval['subject_list'] = subject_list
    if not wf_list:
        retval['return_code'] = 1
        return retval

    workflow.add_nodes(wf_list)
    retval['plugin_settings'] = plugin_settings
    retval['workflow'] = workflow
    retval['return_code'] = 0
    return retval
                        (subject_id))[0]
        fssubjects_dir = "/scr/adenauer1/internet_study/freesurfer/"

        output_file = "/scr/adenauer1/internet_study/results/%s/report.pdf" % (
            subject_id)

        report = Node(Function(input_names=[
            'subject_id', 'tsnr_file', 'realignment_parameters_file',
            'mean_epi_file', 'mask_file', 'reg_file', 'fssubjects_dir',
            'similarity_distribution', 'mean_FD_distribution',
            'tsnr_distributions', 'output_file'
        ],
                               output_names=['out'],
                               function=create_report),
                      name="report_%s" % (subject_id).replace(".", "_"))
        report.inputs.subject_id = subject_id
        report.inputs.tsnr_file = tsnr_file
        report.inputs.realignment_parameters_file = realignment_parameters_file
        report.inputs.mean_epi_file = mean_epi_file
        report.inputs.mask_file = mask_file
        report.inputs.reg_file = reg_file
        report.inputs.fssubjects_dir = fssubjects_dir
        report.inputs.similarity_distribution = similarity_distribution
        report.inputs.mean_FD_distribution = mean_FD_distribution
        report.inputs.tsnr_distributions = tsnr_distributions
        report.inputs.output_file = output_file
        report.plugin_args = {'override_specs': 'request_memory = 4000'}
        wf.add_nodes([report])

    wf.run(plugin="CondorDAGMan")
Example #11
0
# In[48]:

# Establish input/output stream

infosource = Node(IdentityInterface(fields=['subject_id']), name = "infosource")
infosource.iterables = [('subject_id', subject_list_test)]

lhtemplate_files = opj('lhtemplate*.nii.gz')
label_files = opj('{subject_id}-lab.nii.gz')
t1_files = opj('{subject_id}-t1-mask.nii.gz')
t2_files = opj('{subject_id}-t2s-bfc-mask.nii.gz')

templates = {'lhtemplate': lhtemplate_files,
            'label_files': label_files,
             't1_files': t1_files,
             't2_files': t2_files,}
selectfiles = Node(SelectFiles(templates, base_directory=datadir), name = "selectfiles")


# In[55]:

# Create pipeline and connect nodes
workflow = Workflow(name='normflow')
workflow.base_dir = '.'
workflow.add_nodes([test_antsreg_rigid])
#workflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),
#                (selectfiles, test_antsreg_rigid, [('lhtemplate','moving_image')]),])
workflow.write_graph()
workflow.run()

        # CHECK IF ALL FILES EXIST
        def check_if_out_files_exist(check_file_dict):
            for file in check_file_dict.values():
                if not os.path.exists(file):
                    raise Exception('file missing: %s'%file)

        check_file_dict = file_dict.copy()
        check_file_dict.pop('report_file')
        check_if_out_files_exist(check_file_dict)

        report = Node(util.Function(input_names=['subject_id', 'file_dict', 'df'],
                                    output_names=[],
                                    function=create_qc_report_pdf),
                      name='report_%s_%s'%(TR, subject_id))
        report.inputs.subject_id = subject_id
        report.inputs.file_dict = file_dict
        report.inputs.df = df

        wf.add_nodes([report])

# fixme
# ignore warning from np.rank
import warnings

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
Example #13
0
def init_mriqc(opts, retval):
    """Build the workflow enumerator"""

    from bids.grabbids import BIDSLayout
    from nipype import config as ncfg
    from nipype.pipeline.engine import Workflow

    from ..utils.bids import collect_bids_data
    from ..workflows.core import build_workflow

    retval['workflow'] = None
    retval['plugin_settings'] = None

    # Build settings dict
    bids_dir = Path(opts.bids_dir).expanduser()
    output_dir = Path(opts.output_dir).expanduser()

    # Number of processes
    n_procs = opts.n_procs or cpu_count()

    settings = {
        'bids_dir': bids_dir.resolve(),
        'output_dir': output_dir.resolve(),
        'work_dir': opts.work_dir.expanduser().resolve(),
        'write_graph': opts.write_graph,
        'n_procs': n_procs,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'ants_nthreads': opts.ants_nthreads,
        'ants_float': opts.ants_float,
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32,
        'ica': opts.ica,
        'no_sub': opts.no_sub,
        'email': opts.email,
        'fd_thres': opts.fd_thres,
        'webapi_url': opts.webapi_url,
        'webapi_port': opts.webapi_port,
        'upload_strict': opts.upload_strict,
    }

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts. stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    if opts.dsname:
        settings['dataset_name'] = opts.dsname

    log_dir = settings['output_dir'] / 'logs'

    # Create directories
    log_dir.mkdir(parents=True, exist_ok=True)
    settings['work_dir'].mkdir(parents=True, exist_ok=True)

    # Set nipype config
    ncfg.update_config({
        'logging': {'log_directory': str(log_dir), 'log_to_file': True},
        'execution': {
            'crashdump_dir': str(log_dir), 'crashfile_format': 'txt',
            'resource_monitor': opts.profile},
    })

    # Plugin configuration
    plugin_settings = {}
    if n_procs == 1:
        plugin_settings['plugin'] = 'Linear'

        if settings['ants_nthreads'] == 0:
            settings['ants_nthreads'] = 1
    else:
        plugin_settings['plugin'] = 'MultiProc'
        plugin_settings['plugin_args'] = {'n_procs': n_procs}
        if opts.mem_gb:
            plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

        if settings['ants_nthreads'] == 0:
            # always leave one extra thread for non ANTs work,
            # don't use more than 8 threads - the speed up is minimal
            settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)

    # Overwrite options if --use-plugin provided
    if opts.use_plugin and opts.use_plugin.exists():
        from yaml import load as loadyml
        with opts.use_plugin.open() as pfile:
            plugin_settings.update(loadyml(pfile))

    # Process data types
    modalities = opts.modalities

    layout = BIDSLayout(str(settings['bids_dir']),
                        exclude=['derivatives', 'sourcedata'])
    dataset = collect_bids_data(
        layout,
        participant_label=opts.participant_label,
        session=opts.session_id,
        run=opts.run_id,
        task=opts.task_id,
        bids_type=modalities,
    )

    workflow = Workflow(name='workflow_enumerator')
    workflow.base_dir = settings['work_dir']

    wf_list = []
    subject_list = []
    for mod in modalities:
        if dataset[mod]:
            wf_list.append(build_workflow(dataset[mod], mod, settings=settings))
            subject_list += dataset[mod]

    retval['subject_list'] = subject_list
    if not wf_list:
        retval['return_code'] = 1
        return retval

    workflow.add_nodes(wf_list)
    retval['plugin_settings'] = plugin_settings
    retval['workflow'] = workflow
    retval['return_code'] = 0
    return retval
Example #14
0
            acq_str = "0 -1 0 0.0684"
            study = "camcan"
        elif "olm" in subject:
            acq_str = "0 1 0 {TotalReadoutTime}"
            study = "olm"
        else:
            raise ("Cannot determine study")
        wfs = []
        wfs.append(run_process_dwi(wf_dir, subject, sessions, args, study, prep_pipe="mrtrix", acq_str=acq_str,
                                   ants_quick=args.ants_reg_quick))

        wf = Workflow(name=subject)
        wf.base_dir = wf_dir
        wf.config['execution']['crashdump_dir'] = os.path.join(args.output_dir, "crash")

        wf.add_nodes(wfs)
        wf.write_graph(graph2use='colored')

        try:
            wf.run(plugin='MultiProc', plugin_args={'n_procs': args.n_cpus})
        except:
            print("Something went wrong")
            dump_dir = os.path.join(args.output_dir, "crash_dump_wdir", subject)
            shutil.copytree(os.path.join(wf_dir, subject), dump_dir)
            print("Copy working directory to " + dump_dir)
            raise Exception()

elif args.analysis_level == "group":
    output_dir = os.path.join(args.output_dir, "00_group")
    extracted_dir = os.path.join(args.output_dir, "extracted_metrics")
    preprocessed_dir = os.path.join(args.output_dir, "dwi_preprocessed")
Example #15
0
def main():
    """Entry point"""
    from nipype import config as ncfg
    from nipype.pipeline.engine import Workflow
    from mriqc.utils.bids import collect_bids_data
    from mriqc.workflows.core import build_workflow

    # Run parser
    opts = get_parser().parse_args()

    # Build settings dict
    bids_dir = op.abspath(opts.bids_dir)

    # Number of processes
    n_procs = opts.n_procs

    settings = {
        'bids_dir': bids_dir,
        'write_graph': opts.write_graph,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'n_procs': n_procs,
        'ants_nthreads': opts.ants_nthreads,
        'output_dir': op.abspath(opts.output_dir),
        'work_dir': op.abspath(opts.work_dir),
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32,
        'ica': opts.ica,
        'no_sub': opts.no_sub or opts.testing,
        'email': opts.email,
        'fd_thres': opts.fd_thres,
    }

    if not settings['no_sub']:
        MRIQC_LOG.warn('Anonymized quality metrics will be submitted'
                       ' to MRIQC\'s metrics repository.'
                       ' Use --no-sub to disable submission.')

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts. stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    log_dir = op.join(settings['output_dir'], 'logs')

    analysis_levels = opts.analysis_level
    if opts.participant_label is None:
        analysis_levels.append('group')
    analysis_levels = list(set(analysis_levels))
    if len(analysis_levels) > 2:
        raise RuntimeError('Error parsing analysis levels, got "%s"' % ', '.join(analysis_levels))

    settings['report_dir'] = opts.report_dir
    if not settings['report_dir']:
        settings['report_dir'] = op.join(settings['output_dir'], 'reports')

    check_folder(settings['output_dir'])
    if 'participant' in analysis_levels:
        check_folder(settings['work_dir'])

    check_folder(log_dir)
    check_folder(settings['report_dir'])

    # Set nipype config
    ncfg.update_config({
        'logging': {'log_directory': log_dir, 'log_to_file': True},
        'execution': {'crashdump_dir': log_dir, 'crashfile_format': 'txt'},
    })

    callback_log_path = None
    plugin_settings = {'plugin': 'Linear'}
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as pfile:
            plugin_settings = loadyml(pfile)
    else:
        # Setup multiprocessing
        if settings['n_procs'] == 0:
            settings['n_procs'] = cpu_count()

        if settings['ants_nthreads'] == 0:
            if settings['n_procs'] > 1:
                # always leave one extra thread for non ANTs work,
                # don't use more than 8 threads - the speed up is minimal
                settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)
            else:
                settings['ants_nthreads'] = 1

        if settings['n_procs'] > 1:
            plugin_settings['plugin'] = 'MultiProc'
            plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}
            if opts.mem_gb:
                plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    MRIQC_LOG.info(
        'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
        __version__, ', '.join(analysis_levels), opts.participant_label, settings)

    # Process data types
    modalities = opts.modalities

    dataset = collect_bids_data(
        settings['bids_dir'],
        modalities=modalities,
        participant_label=opts.participant_label,
        session=opts.session_id,
        run=opts.run_id,
        task=opts.task_id,
    )

    # Set up participant level
    if 'participant' in analysis_levels:
        workflow = Workflow(name='workflow_enumerator')
        workflow.base_dir = settings['work_dir']

        wf_list = []
        for mod in modalities:
            if not dataset[mod]:
                MRIQC_LOG.warn('No %s scans were found in %s', mod, settings['bids_dir'])
                continue

            wf_list.append(build_workflow(dataset[mod], mod, settings=settings))

        if wf_list:
            workflow.add_nodes(wf_list)

            if not opts.dry_run:
                if plugin_settings['plugin'] == 'MultiProc' and opts.profile:
                    import logging
                    from nipype.pipeline.plugins.callback_log import log_nodes_cb
                    plugin_settings['plugin_args']['status_callback'] = log_nodes_cb
                    callback_log_path = op.join(log_dir, 'run_stats.log')
                    logger = logging.getLogger('callback')
                    logger.setLevel(logging.DEBUG)
                    handler = logging.FileHandler(callback_log_path)
                    logger.addHandler(handler)

                workflow.run(**plugin_settings)
                if not settings['no_sub']:
                    MRIQC_LOG.warn(
                        'Anonymized quality metrics have beeen submitted'
                        ' to MRIQC\'s metrics repository.'
                        ' Use --no-sub to disable submission.')
                if callback_log_path is not None:
                    from nipype.utils.draw_gantt_chart import generate_gantt_chart
                    generate_gantt_chart(callback_log_path, cores=settings['n_procs'])
        else:
            raise RuntimeError('Error reading BIDS directory (%s), or the dataset is not '
                               'BIDS-compliant.' % settings['bids_dir'])

    # Set up group level
    if 'group' in analysis_levels:
        from mriqc.reports import group_html
        from mriqc.utils.misc import generate_csv, generate_pred

        reports_dir = check_folder(op.join(settings['output_dir'], 'reports'))
        derivatives_dir = op.join(settings['output_dir'], 'derivatives')

        n_group_reports = 0
        for mod in modalities:
            dataframe, out_csv = generate_csv(derivatives_dir,
                                              settings['output_dir'], mod)

            # If there are no iqm.json files, nothing to do.
            if dataframe is None:
                MRIQC_LOG.warn(
                    'No IQM-JSON files were found for the %s data type in %s. The group-level '
                    'report was not generated.', mod, derivatives_dir)
                continue

            MRIQC_LOG.info('Summary CSV table for the %s data generated (%s)', mod, out_csv)

            # out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
            # if out_pred is not None:
            #     MRIQC_LOG.info('Predicted QA CSV table for the %s data generated (%s)',
            #                    mod, out_pred)

            out_html = op.join(reports_dir, mod + '_group.html')
            group_html(out_csv, mod,
                       csv_failed=op.join(settings['output_dir'], 'failed_' + mod + '.csv'),
                       out_file=out_html)
            MRIQC_LOG.info('Group-%s report generated (%s)', mod, out_html)
            n_group_reports += 1

        if n_group_reports == 0:
            raise Exception("No data found. No group level reports were generated.")
Example #16
0
def main():
    """Entry point"""
    from nipype import config as ncfg, logging as nlog
    from nipype.pipeline.engine import Workflow

    from .. import logging
    from ..utils.bids import collect_bids_data
    from ..workflows.core import build_workflow
    from ..utils.misc import check_folder

    # Run parser
    opts = get_parser().parse_args()

    # Retrieve logging level
    log_level = int(max(3 - opts.verbose_count, 0) * 10)
    if opts.verbose_count > 1:
        log_level = int(max(25 - 5 * opts.verbose_count, 1))

    logging.getLogger().setLevel(log_level)
    log = logging.getLogger('mriqc.cli')

    # Build settings dict
    bids_dir = op.abspath(opts.bids_dir)

    # Number of processes
    n_procs = opts.n_procs

    settings = {
        'bids_dir': bids_dir,
        'write_graph': opts.write_graph,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'n_procs': n_procs,
        'ants_nthreads': opts.ants_nthreads,
        'ants_float': opts.ants_float,
        'output_dir': op.abspath(opts.output_dir),
        'work_dir': op.abspath(opts.work_dir),
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32,
        'ica': opts.ica,
        'no_sub': opts.no_sub,
        'email': opts.email,
        'fd_thres': opts.fd_thres,
        'webapi_url': opts.webapi_url,
        'webapi_port': opts.webapi_port,
        'upload_strict': opts.upload_strict,
    }

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts.stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    log_dir = op.join(settings['output_dir'], 'logs')

    analysis_levels = opts.analysis_level
    if opts.participant_label is None:
        analysis_levels.append('group')
    analysis_levels = list(set(analysis_levels))
    if len(analysis_levels) > 2:
        raise RuntimeError('Error parsing analysis levels, got "%s"' %
                           ', '.join(analysis_levels))

    settings['report_dir'] = opts.report_dir
    if not settings['report_dir']:
        settings['report_dir'] = op.join(settings['output_dir'], 'reports')

    check_folder(settings['output_dir'])
    if 'participant' in analysis_levels:
        check_folder(settings['work_dir'])

    check_folder(log_dir)
    check_folder(settings['report_dir'])

    # Set nipype config
    ncfg.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': log_dir,
            'crashfile_format': 'txt',
            'resource_monitor': opts.profile
        },
    })

    # Set nipype logging level
    nlog.getLogger('workflow').setLevel(log_level)
    nlog.getLogger('interface').setLevel(log_level)
    nlog.getLogger('utils').setLevel(log_level)

    plugin_settings = {'plugin': 'Linear'}
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as pfile:
            plugin_settings = loadyml(pfile)
    else:
        # Setup multiprocessing
        if settings['n_procs'] == 0:
            settings['n_procs'] = cpu_count()

        if settings['ants_nthreads'] == 0:
            if settings['n_procs'] > 1:
                # always leave one extra thread for non ANTs work,
                # don't use more than 8 threads - the speed up is minimal
                settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)
            else:
                settings['ants_nthreads'] = 1

        if settings['n_procs'] > 1:
            plugin_settings['plugin'] = 'MultiProc'
            plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}
            if opts.mem_gb:
                plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    # Process data types
    modalities = opts.modalities

    # Set up participant level
    if 'participant' in analysis_levels:
        log.info('Participant level started. Checking BIDS dataset...')
        dataset = collect_bids_data(
            settings['bids_dir'],
            modalities=modalities,
            participant_label=opts.participant_label,
            session=opts.session_id,
            run=opts.run_id,
            task=opts.task_id,
        )

        log.info(
            'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
            __version__, ', '.join(analysis_levels), opts.participant_label,
            settings)

        workflow = Workflow(name='workflow_enumerator')
        workflow.base_dir = settings['work_dir']

        wf_list = []
        for mod in modalities:
            if not dataset[mod]:
                log.warning('No %s scans were found in %s', mod,
                            settings['bids_dir'])
                continue

            wf_list.append(build_workflow(dataset[mod], mod,
                                          settings=settings))

        if wf_list:
            workflow.add_nodes(wf_list)

            if not opts.dry_run:
                # Warn about submitting measures BEFORE
                if not settings['no_sub']:
                    log.warning('Anonymized quality metrics will be submitted'
                                ' to MRIQC\'s metrics repository.'
                                ' Use --no-sub to disable submission.')

                # run MRIQC
                workflow.run(**plugin_settings)

                # Warn about submitting measures AFTER
                if not settings['no_sub']:
                    log.warning(
                        'Anonymized quality metrics have beeen submitted'
                        ' to MRIQC\'s metrics repository.'
                        ' Use --no-sub to disable submission.')
        else:
            msg = 'Error reading BIDS directory ({}), or the dataset is not ' \
                  'BIDS-compliant.'

            if opts.participant_label or opts.session_id or opts.run_id or opts.task_id:

                msg = 'The combination of supplied labels'

                if opts.participant_label is not None:
                    msg += ' (--participant_label {})'.format(" ".join(
                        opts.participant_label))
                if opts.session_id is not None:
                    msg += ' (--session-id {})'.format(" ".join(
                        opts.session_id))
                if opts.run_id is not None:
                    msg += ' (--run-id {})'.format(" ".join(opts.run_id))
                if opts.task_id is not None:
                    msg += ' (--task-id {})'.format(" ".join(opts.task_id))

                msg += ' did not result in matches within the BIDS directory ({}).'

            raise RuntimeError(msg.format(settings['bids_dir']))

        log.info('Participant level finished successfully.')

    # Set up group level
    if 'group' in analysis_levels:
        from ..reports import group_html
        from ..utils.misc import generate_csv  # , generate_pred

        log.info('Group level started...')
        log.info(
            'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
            __version__, ', '.join(analysis_levels), opts.participant_label,
            settings)

        reports_dir = check_folder(op.join(settings['output_dir'], 'reports'))
        derivatives_dir = op.join(settings['output_dir'], 'derivatives')

        n_group_reports = 0
        for mod in modalities:
            dataframe, out_csv = generate_csv(derivatives_dir,
                                              settings['output_dir'], mod)

            # If there are no iqm.json files, nothing to do.
            if dataframe is None:
                log.warning(
                    'No IQM-JSON files were found for the %s data type in %s. The group-level '
                    'report was not generated.', mod, derivatives_dir)
                continue

            log.info('Summary CSV table for the %s data generated (%s)', mod,
                     out_csv)

            # out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
            # if out_pred is not None:
            #     log.info('Predicted QA CSV table for the %s data generated (%s)',
            #                    mod, out_pred)

            out_html = op.join(reports_dir, mod + '_group.html')
            group_html(out_csv,
                       mod,
                       csv_failed=op.join(settings['output_dir'],
                                          'failed_' + mod + '.csv'),
                       out_file=out_html)
            log.info('Group-%s report generated (%s)', mod, out_html)
            n_group_reports += 1

        if n_group_reports == 0:
            raise Exception(
                "No data found. No group level reports were generated.")

        log.info('Group level finished successfully.')