def __init__(self, paradigm, datasets=None, random_state=None, n_jobs=1, overwrite=False, error_score='raise', suffix='', hdf5_path=None): self.random_state = random_state self.n_jobs = n_jobs self.error_score = error_score self.hdf5_path = hdf5_path # check paradigm if not isinstance(paradigm, BaseParadigm): raise (ValueError("paradigm must be an Paradigm instance")) self.paradigm = paradigm # if no dataset provided, then we get the list from the paradigm if datasets is None: datasets = self.paradigm.datasets if not isinstance(datasets, list): if isinstance(datasets, BaseDataset): datasets = [datasets] else: raise (ValueError("datasets must be a list or a dataset " "instance")) for dataset in datasets: if not (isinstance(dataset, BaseDataset)): raise (ValueError("datasets must only contains dataset " "instance")) rm = [] for dataset in datasets: # fixme, we might want to drop dataset that are not compatible valid_for_paradigm = self.paradigm.is_valid(dataset) valid_for_eval = self.is_valid(dataset) if not valid_for_paradigm: log.warning(f"{dataset} not compatible with " "paradigm. Removing this dataset from the list.") rm.append(dataset) elif not valid_for_eval: log.warning(f"{dataset} not compatible with evaluation. " "Removing this dataset from the list.") rm.append(dataset) [datasets.remove(r) for r in rm] if len(datasets) > 0: self.datasets = datasets else: raise Exception('''No datasets left after paradigm and evaluation checks''') self.results = Results(type(self), type(self.paradigm), overwrite=overwrite, suffix=suffix, hdf5_path=self.hdf5_path)
def process(self, pipelines, overwrite=False, suffix=''): ''' Runs tasks on all given datasets. ''' # check pipelines if not isinstance(pipelines, dict): raise (ValueError("pipelines must be a dict")) for _, pipeline in pipelines.items(): if not (isinstance(pipeline, BaseEstimator)): raise (ValueError("pipelines must only contains Pipelines " "instance")) results = Results(type(self), type(self.paradigm), overwrite=overwrite, suffix=suffix) for dataset in self.datasets: log.info('Processing dataset: {}'.format(dataset.code)) self.preprocess_data(dataset) for subject in dataset.subject_list: # check if we already have result for this subject/pipeline run_pipes = results.not_yet_computed(pipelines, dataset, subject) if len(run_pipes) > 0: try: res = self.evaluate(dataset, subject, run_pipes) for pipe in res: for r in res[pipe]: message = '{} | '.format(pipe) message += '{} | {} '.format( r['dataset'].code, r['id']) message += ': Score %.3f' % r['score'] log.info(message) results.add(res, pipelines=pipelines) except Exception as e: log.error(e) log.debug(traceback.format_exc()) log.warning('Skipping subject {}'.format(subject)) return results
def __init__(self, paradigm, datasets=None, random_state=None, n_jobs=1, overwrite=False, suffix=''): self.random_state = random_state self.n_jobs = n_jobs # check paradigm if not isinstance(paradigm, BaseParadigm): raise(ValueError("paradigm must be an Paradigm instance")) self.paradigm = paradigm # if no dataset provided, then we get the list from the paradigm if datasets is None: datasets = self.paradigm.datasets if not isinstance(datasets, list): if isinstance(datasets, BaseDataset): datasets = [datasets] else: raise(ValueError("datasets must be a list or a dataset " "instance")) for dataset in datasets: if not(isinstance(dataset, BaseDataset)): raise(ValueError("datasets must only contains dataset " "instance")) for dataset in datasets: # fixme, we might want to drop dataset that are not compatible try: self.paradigm.verify(dataset) self.verify(dataset) except AssertionError: log.warning(f"{dataset} not compatible with evaluation or " "paradigm. Removing this dataset from the list.") datasets.remove(dataset) self.datasets = datasets self.results = Results(type(self), type(self.paradigm), overwrite=overwrite, suffix=suffix)
def setUp(self): self.obj = Results(evaluation_class=DummyEvaluation, paradigm_class=DummyParadigm, suffix='test')
def __init__( self, paradigm, datasets=None, random_state=None, n_jobs=1, overwrite=False, error_score="raise", suffix="", hdf5_path=None, additional_columns=None, return_epochs=False, mne_labels=False, ): self.random_state = random_state self.n_jobs = n_jobs self.error_score = error_score self.hdf5_path = hdf5_path self.return_epochs = return_epochs self.mne_labels = mne_labels # check paradigm if not isinstance(paradigm, BaseParadigm): raise (ValueError("paradigm must be an Paradigm instance")) self.paradigm = paradigm # check labels if self.mne_labels and not self.return_epochs: raise ( ValueError("mne_labels could only be set with return_epochs")) # if no dataset provided, then we get the list from the paradigm if datasets is None: datasets = self.paradigm.datasets if not isinstance(datasets, list): if isinstance(datasets, BaseDataset): datasets = [datasets] else: raise (ValueError("datasets must be a list or a dataset " "instance")) for dataset in datasets: if not (isinstance(dataset, BaseDataset)): raise (ValueError("datasets must only contains dataset " "instance")) rm = [] for dataset in datasets: valid_for_paradigm = self.paradigm.is_valid(dataset) valid_for_eval = self.is_valid(dataset) if not valid_for_paradigm: log.warning(f"{dataset} not compatible with " "paradigm. Removing this dataset from the list.") rm.append(dataset) elif not valid_for_eval: log.warning(f"{dataset} not compatible with evaluation. " "Removing this dataset from the list.") rm.append(dataset) [datasets.remove(r) for r in rm] if len(datasets) > 0: self.datasets = datasets else: raise Exception("""No datasets left after paradigm and evaluation checks""") self.results = Results( type(self), type(self.paradigm), overwrite=overwrite, suffix=suffix, hdf5_path=self.hdf5_path, additional_columns=additional_columns, )
def setUp(self): self.obj = Results(evaluation_class=type( DummyEvaluation(DummyParadigm())), paradigm_class=type(DummyParadigm()), suffix='test')