示例#1
0
 def test_all_policies_work(self):
     kwargs = dict(paradigm=FakeImageryParadigm(),
                   datasets=[dataset],
                   n_perms=[2, 2])
     # The next two should work without issue
     ev.WithinSessionEvaluation(data_size={
         "policy": "per_class",
         "value": [5, 10]
     },
                                **kwargs)
     ev.WithinSessionEvaluation(data_size={
         "policy": "ratio",
         "value": [0.2, 0.5]
     },
                                **kwargs)
     self.assertRaises(
         ValueError,
         ev.WithinSessionEvaluation,
         **dict(data_size={
             "policy": "does_not_exist",
             "value": [0.2, 0.5]
         },
                **kwargs),
     )
示例#2
0
 def test_correct_results_integrity(self):
     learning_curve_eval = ev.WithinSessionEvaluation(
         paradigm=FakeImageryParadigm(),
         datasets=[dataset],
         data_size={
             "policy": "ratio",
             "value": np.array([0.2, 0.5])
         },
         n_perms=np.array([2, 2]),
     )
     results = [r for r in learning_curve_eval.evaluate(dataset, pipelines)]
     keys = results[0].keys()
     self.assertEqual(len(keys), 10)  # 8 + 2 new for learning curve
     self.assertTrue("permutation" in keys)
     self.assertTrue("data_size" in keys)
示例#3
0
    def test_data_sanity(self):
        # need this helper to iterate over the generator
        def run_evaluation(eval, dataset, pipelines):
            list(eval.evaluate(dataset, pipelines))

        # E.g. if number of samples too high -> expect error
        kwargs = dict(paradigm=FakeImageryParadigm(),
                      datasets=[dataset],
                      n_perms=[2, 2])
        should_work = ev.WithinSessionEvaluation(data_size={
            "policy": "per_class",
            "value": [5, 10]
        },
                                                 **kwargs)
        too_many_samples = ev.WithinSessionEvaluation(data_size={
            "policy": "per_class",
            "value": [5, 100000]
        },
                                                      **kwargs)
        # This one should run
        run_evaluation(should_work, dataset, pipelines)
        self.assertRaises(ValueError, run_evaluation, too_many_samples,
                          dataset, pipelines)
        pass
示例#4
0
 def setUp(self):
     self.eval = ev.WithinSessionEvaluation(paradigm=FakeImageryParadigm(),
                                            datasets=[dataset])
示例#5
0
 def setUp(self):
     self.eval = ev.WithinSessionEvaluation(
         paradigm=FakeImageryParadigm(),
         datasets=[dataset],
         additional_columns=["one", "two"],
     )
示例#6
0
 def return_eval(self):
     return ev.WithinSessionEvaluation(paradigm=FakeImageryParadigm(),
                                       datasets=[dataset])