Esempio n. 1
0
def parse_pipelines(pipelines_args: str or set = "all") -> set:
    """
    Parses all possible pipeline options:
    :param pipelines_args: set or str, only valid string argument is 'all'.
    If argument is set it can containing both names of pipelines from
    fmridenoise.pipelines directory or path(s) to valid json file(s)
    containing of valid pipeline description.
    :return: set of valid pipelines paths.
    """
    if type(pipelines_args) is str:
        if pipelines_args != "all":
            raise ValueError("Only valid string argument is 'all'")
        else:
            return get_pipelines_paths()
    known_pipelines = get_pipelines_names()
    pipelines_args = set(pipelines_args)
    if pipelines_args <= known_pipelines:
        return get_pipelines_paths(pipelines_args)
    ret = set()
    for p in pipelines_args:
        if p in known_pipelines:
            ret.add(get_pipeline_path(p))
        elif p not in known_pipelines and is_valid(load_pipeline_from_json(p)):
            ret.add(p)
        else:
            raise ValueError(f"File: '{p} is not a valid pipeline")
    return ret
def run(output_dir: str, pipeline_name: str, fmri_file: str, conf_raw: str,
        conf_json: str):
    pipeline = load_pipeline_from_json(get_pipeline_path(pipeline_name))
    workflow = Workflow(name="test_workflow", base_dir=output_dir)
    conf_node = Node(Confounds(pipeline=pipeline,
                               conf_raw=conf_raw,
                               conf_json=conf_json,
                               subject="test",
                               task="test",
                               session="test",
                               output_dir=output_dir),
                     name="Confprep")
    denoising_node = Node(Denoise(pipeline=pipeline,
                                  task="test",
                                  output_dir=output_dir),
                          name="Denoise")
    if not is_IcaAROMA(pipeline):
        smoothing_node = Node(Smooth(fmri_prep=fmri_file,
                                     output_directory=output_dir),
                              name="Smooth")
        workflow.connect([(smoothing_node, denoising_node, [("fmri_smoothed",
                                                             "fmri_prep")])])
    else:
        denoising_node.inputs.fmri_prep_aroma = fmri_file
    workflow.connect([(conf_node, denoising_node, [("conf_prep", "conf_prep")])
                      ])
    workflow.run()
Esempio n. 3
0
class QualityMeasuresAsNodeTestBase:
    group_conf_summary: pd.DataFrame = ...
    distance_matrix: np.ndarray = ...
    group_corr_mat: np.ndarray = ...
    pipeline = load_pipeline_from_json(get_pipeline_path('pipeline-Null'))

    @classmethod
    def tearDownClass(cls) -> None:
        shutil.rmtree(cls.tempdir)

    @classmethod
    def setUpClass(cls) -> None:
        cls.tempdir = tempfile.mkdtemp()
        group_conf_summary_file = join(
            cls.tempdir, 'task-test_pipeline-Null_group_conf_summary.tsv')
        cls.group_conf_summary.to_csv(group_conf_summary_file,
                                      sep="\t",
                                      index=False)
        distance_matrix_file = join(
            cls.tempdir, "task-test_pipeline-Null_distance_matrix.npy")
        np.save(distance_matrix_file, cls.distance_matrix)
        group_corr_mat_file = join(
            cls.tempdir, "task-test_pipeline-Null_group_corr_mat.npy")
        np.save(group_corr_mat_file, cls.group_corr_mat)
        cls.quality_measures_node = Node(QualityMeasures(),
                                         name="QualityMeasures")
        cls.quality_measures_node.inputs.group_conf_summary = group_conf_summary_file
        cls.quality_measures_node.inputs.distance_matrix = distance_matrix_file
        cls.quality_measures_node.inputs.group_corr_mat = group_corr_mat_file
        cls.quality_measures_node.inputs.pipeline = cls.pipeline
        cls.quality_measures_node.inputs.output_dir = cls.tempdir
        cls.result = cls.quality_measures_node.run()
Esempio n. 4
0
    def _run_interface(self, runtime):
        js = load_pipeline_from_json(self.inputs.pipeline_path)
        if not is_valid(js):
            raise ValueError("""
            Json file {} is not a valid pipeline, 
            check schema at fmridenoise.utils.json_validator.py
            """.format(os.path.basename(self.inputs.pipeline_path)))

        self._results['pipeline'] = js
        self._results['pipeline_name'] = js['name']

        return runtime
Esempio n. 5
0
def run(output_dir: str, pipeline_name: str, group_corr_mat: str,
        group_conf_summary: str):
    workflow = Workflow(name="test_workflow", base_dir=output_dir)
    identity_node = Node(IdentityInterface(fields=[
        "pipeline", "group_corr_mat", "distance_matrix", "group_conf_summary"
    ]),
                         name="SomeInputSource")
    identity_node.inputs.pipeline = load_pipeline_from_json(
        get_pipeline_path(pipeline_name))
    identity_node.inputs.group_corr_mat = group_corr_mat
    identity_node.inputs.distance_matrix = get_distance_matrix_file_path()
    identity_node.inputs.group_conf_summary = group_conf_summary
    quality_node = Node(QualityMeasures(output_dir=output_dir),
                        name="QualitMeasures")
    workflow.connect([(identity_node, quality_node,
                       [("pipeline", "pipeline"),
                        ("group_corr_mat", "group_corr_mat"),
                        ("distance_matrix", "distance_matrix"),
                        ("group_conf_summary", "group_conf_summary")])])
    workflow.run()
Esempio n. 6
0
class TestPipelinesParser(ut.TestCase):
    """
    Test for parse_pipelines form __main__
    """
    pipelines_dir = dirname(pipe.__file__)
    all_pipelines_valid = set(glob(join(pipelines_dir, "*.json")))
    noicaaroma_pipelines_valid = set([
        p for p in all_pipelines_valid
        if not pipe.is_IcaAROMA(pipe.load_pipeline_from_json(p))
    ])

    def test_parse_pipelines_without_custom(self):
        """Checks if parser accepts all pipelines from fmridenoise.pipeline"""
        paths = parse_pipelines("all")
        self.assertSetEqual(self.all_pipelines_valid, paths)

    def test_parse_pipelines_custom(self):
        """Checks if parser accepts only custom pipeline"""
        custom = {join(dirname(__file__), "custom_pipeline.json")}
        paths = parse_pipelines(custom)
        self.assertSetEqual(custom, paths)

    def test_parse_pipelines_mix(self):
        """Checks if parser accepts custom pipeline mixed with fmridenise.pipeline"""
        addition = join(dirname(__file__), "custom_pipeline.json")
        custom = self.all_pipelines_valid.copy()
        custom.add(addition)
        paths = parse_pipelines(custom)
        self.assertSetEqual(paths, custom)

    def test_parse_pipelines_known_pipeline(self):
        """Checks if parser accepts pipeline from fmridenoise selected by name"""
        selected = "pipeline-24HMP_8Phys_SpikeReg_4GS"
        selected_path = {(join(self.pipelines_dir, selected) + ".json")}
        paths = parse_pipelines([
            selected
        ])  # __main__ always return list of paths/selected pipelines names
        self.assertSetEqual(paths, selected_path)
Esempio n. 7
0
 excluded_subjects = [
     ExcludedSubjects(pipeline_name='Null',
                      task='rest',
                      session='1',
                      run=1,
                      excluded={'sub-1', 'sub-2', 'sub-3'}),
     ExcludedSubjects(pipeline_name='Null',
                      task='rest',
                      session='1',
                      run=2,
                      excluded={'sub-1', 'sub-2', 'sub-3'})
 ]
 pipelines = []
 for pipeline_name in pipelines_dict.values():
     pipelines.append(
         load_pipeline_from_json(get_pipeline_path(pipeline_name)))
 # Input arguments for ReportCreator interface
 plots_dict = create_dummy_plots(entity_list=entity_list,
                                 pipeline_dict=pipelines_dict,
                                 path_out=os.path.join(report_dir, 'tmp'))
 # Create & run interface
 interface = ReportCreator(runtime_info=RuntimeInfo(
     input_args=str(reduce(lambda x, y: f"{x} {y}", sys.argv)),
     version=get_versions().get('version')),
                           pipelines=pipelines,
                           tasks=['rest', 'tapping'],
                           sessions=['1', '2'],
                           runs=[1, 2],
                           output_dir=report_dir,
                           warnings=warnings,
                           excluded_subjects=excluded_subjects,
Esempio n. 8
0
def create_report_data(entity_list, pipelines_dict, plots_dict):
    ''' Creates dict representing all data used for creating reports

    Args:
        entity_list (list):
            List of dictionaries with either one key 'task' or two keys 'task' 
            and 'sub' describing single denoising entity.
        pipeline dict (dict):
            Dictionary with keys corresponding to pipeline abbreviation used as 
            filename suffixes and values corresponding to 
            pipeline filename (JSON).
        plots_dict (dict):
            Dict mimicking the input of the CreateReport interface. Keys 
            correspond to variable names and values correspond to variable 
            values (list of paths).

    Returns:
        List with structured dictionaries for each entity.

        Dictionary keys and values:
            'entity_name': 
                Name of task / task+session entity used to name the report tab.
            'entity_id':
                Id of task / task+session entity used for html elements id.
            'plot_all_pipelines_<plot_name>': 
                Path for plot aggregating pipelines.
            'pipeline'
                List of dicts for each pipeline. Each pipeline dictionary has 
                key, value pairs:
                
                'pipeline_dict':
                    Parsed pipeline JSON.
                'plot_pipeline_<plot_name>':
                    Path for plot for single pipeline.

    Note:
        This functionaly should be reimplemented within ReportCreator interface.
        Output of this function is the only required argument for function 
        generating HTML report.
    '''
    report_data = []

    plots_all_pipelines = {
        k: v for k, v in plots_dict.items() 
        if '_all_pipelines_' in k}
    plots_pipeline = {
        k: v for k, v in plots_dict.items() 
        if '_pipeline_' in k}

    for entity in entity_list:

        # General informations
        if 'ses' in entity:
            entity_data = {
                'entity_name': f'task-{entity["task"]} ses-{entity["ses"]}', 
                'entity_id': f'task-{entity["task"]}-ses-{entity["ses"]}'            
                }
        else:
            entity_data = {
                'entity_name': f'task-{entity["task"]}', 
                'entity_id': f'task-{entity["task"]}'            
                }

        # Plots for all pipelines
        for plot_name, plots_list in plots_all_pipelines.items():

            entity_data[plot_name] = next(filter(
                lambda path: all(v in path for v in entity.values()), 
                plots_list
                ))

        # Plots for single pipeline
        entity_data['pipeline'] = []
        for pipeline, pipeline_file in pipelines_dict.items():

            entity_pipeline_data = {
                'pipeline_dict': load_pipeline_from_json(
                    os.path.join('fmridenoise/pipelines', pipeline_file))
                    }

            for plot_name, plots_list in plots_pipeline.items():

                entity_pipeline_data[plot_name] = next(filter(
                    lambda path: (all(v in path for v in entity.values()) 
                                  and pipeline in path), 
                    plots_list
                    ))

            entity_data['pipeline'].append(entity_pipeline_data)

        report_data.append(entity_data)

    return report_data
Esempio n. 9
0
class ValidateFilesOnMissingTestCase(ut.TestCase):
    """
    TODO: Consider adding mutating test covering all possible parameter configurations
    """
    derivatives = ["fmriprep"]
    tasks = ["audionback", "dualnback", "rest", "spatialnback"]
    sessions = ["1", "2", "3", "4"]
    subjects = ["01", "02"]
    pipelines = aromaPipelinesPaths + noAromaPipelinePaths
    pipelinesDicts = list(map(lambda x: pipe.load_pipeline_from_json(x), pipelines))
    bids_dir = dummyMissing
    maxDiff = None

    @classmethod
    def setUpClass(cls) -> None:
        # Validate derivatives argument
        derivatives, scope = BIDSValidate.validate_derivatives(
            bids_dir=cls.bids_dir,
            derivatives=cls.derivatives
        )

        # Load layout
        cls.layout = BIDSLayout(
            root=cls.bids_dir,
            derivatives=derivatives,
            validate=True,
            index_metadata=False
        )

    def parametrizedTest(self,
                         tasks: List[str],
                         sessions: List[str],
                         subjects: List[str],
                         runs: List[str],
                         include_aroma: bool,
                         include_no_aroma: bool,
                         should_pass: bool):
        if not should_pass:
            with self.assertRaises(MissingFile):
                BIDSValidate.validate_files(
                    layout=self.layout,
                    tasks=tasks,
                    sessions=sessions,
                    subjects=subjects,
                    runs=runs,
                    include_aroma=include_aroma,
                    include_no_aroma=include_no_aroma
                )
        else:
            BIDSValidate.validate_files(
                layout=self.layout,
                tasks=tasks,
                sessions=sessions,
                subjects=subjects,
                runs=runs,
                include_aroma=include_aroma,
                include_no_aroma=include_no_aroma
            )

    def test_task_dualnback_sub_01_ses_1_aromta_t_PASS(self):
        self.parametrizedTest(
            tasks=['dualnback'],
            sessions=['1'],
            subjects=['01'],
            runs=[],
            include_no_aroma=True,
            include_aroma=True,
            should_pass=True
        )

    def test_task_audionback_sub_01_ses_1_aromta_f_PASS(self):
        self.parametrizedTest(
            tasks=['audionback'],
            sessions=['1'],
            subjects=['01'],
            runs=[],
            include_no_aroma=True,
            include_aroma=False,
            should_pass=True
        )

    # test against missing aroma files
    def test_task_audionback_sub_01_ses_1_aroma_t_FAIL(self):
        self.parametrizedTest(
            tasks=['audionback'],
            sessions=['1'],
            subjects=['01'],
            runs=[],
            include_no_aroma=True,
            include_aroma=True,
            should_pass=False
        )

    def test_task_audionback_sub_01_ses_2_aroma_t_PASS(self):
        self.parametrizedTest(
            tasks=['audionback'],
            sessions=['2'],
            subjects=['01'],
            runs=[],
            include_no_aroma=True,
            include_aroma=True,
            should_pass=True
        )

    # test against missing session
    def test_task_audionback_sub_01_02_ses_2_aroma_t_FAIL(self):
        self.parametrizedTest(
            tasks=['audionback'],
            sessions=['2'],
            subjects=['01', '02'],
            runs=[],
            include_no_aroma=True,
            include_aroma=True,
            should_pass=False
        )

    # test against missing subject
    def test_task_audionback_sub_03_ses_2_aroma_t_FAIL(self):
        self.parametrizedTest(
            tasks=['audionback'],
            sessions=['2'],
            subjects=['03'],
            runs=[],
            include_no_aroma=True,
            include_aroma=True,
            should_pass=False
        )
Esempio n. 10
0
    def _run_interface(self, runtime):

        # Validate derivatives argument
        derivatives, _ = BIDSValidate.validate_derivatives(
            bids_dir=self.inputs.bids_dir,
            derivatives=self.inputs.derivatives
        )

        # Load layout
        layout = BIDSLayout(
            root=self.inputs.bids_dir,
            derivatives=derivatives,
            validate=False
        )

        # Load pipelines
        pipelines_dicts = []
        for pipeline in self.inputs.pipelines:
            pipelines_dicts.append(load_pipeline_from_json(pipeline))

        # Check if there is at least one pipeline requiring aroma
        include_aroma = any(map(is_IcaAROMA, pipelines_dicts))

        # Check if there is at least one pipeline requiring no armoa files
        include_no_aroma = not all(map(is_IcaAROMA, pipelines_dicts))

        # Check missing files and act accordingly
        entities_files, (tasks, sessions, subjects, runs) = BIDSValidate.validate_files(
            layout=layout,
            tasks=self.inputs.tasks,
            sessions=self.inputs.sessions,
            subjects=self.inputs.subjects,
            runs=self.inputs.runs,
            include_aroma=include_aroma,
            include_no_aroma=include_no_aroma
        )

        # Convert entities_files into separate lists of BIDSImageFile Objects
        def filter_entity(entity_files: t.List[t.Dict[str, t.Any]], key: str) -> t.List[str]:
            return list(entity[key].path for entity in entity_files if entity.get(key) is not None)
        
        conf_raw = filter_entity(entities_files, 'conf_raw')
        conf_json = filter_entity(entities_files, 'conf_json')

        if include_no_aroma:
            fmri_prep = filter_entity(entities_files, 'fmri_prep')
        else:
            fmri_prep = []

        if include_aroma:
            fmri_prep_aroma = filter_entity(entities_files, 'fmri_prep_aroma')
        else:
            fmri_prep_aroma = []

        # Extract TR for specific tasks
        tr_dict = {}

        # TODO: this is just a funny workaround, look for better solution later
        layout_for_tr = BIDSLayout(
            root=self.inputs.bids_dir,
            derivatives=derivatives,
            validate=True,
            index_metadata=True
        )

        for task in tasks:
            filter_fmri_tr = {
                'extension': ['nii', 'nii.gz'],
                'suffix': 'bold',
                'desc': 'preproc',
                'space': 'MNI152NLin2009cAsym',
                'task': task
            }

            try:
                example_file = layout_for_tr.get(**filter_fmri_tr)[0]
            except IndexError:
                raise MissingFile(f'no imaging file found for task {task}')
            tr_dict[task] = layout_for_tr.get_metadata(example_file.path)[
                'RepetitionTime']

        # check space
        # TODO:
        # spaces = layout.get_entities()['space'].unique()
        # for space in spaces:
        #     get_parcellation_file_path(space)

        # Prepare outputs
        self._results['fmri_prep'] = fmri_prep
        self._results['fmri_prep_aroma'] = fmri_prep_aroma
        self._results['conf_raw'] = conf_raw
        self._results['conf_json'] = conf_json
        self._results['tasks'] = tasks
        self._results['sessions'] = sessions
        self._results['subjects'] = subjects
        self._results['runs'] = runs
        self._results['pipelines'] = pipelines_dicts
        self._results['tr_dict'] = tr_dict

        return runtime