def test_averages(self): sleep_wake_performance_1 = SleepWakePerformance( accuracy=0, wake_correct=0.1, sleep_correct=0.2, auc=0.3, kappa=0.4, wake_predictive_value=0.5, sleep_predictive_value=0.6) sleep_wake_performance_2 = SleepWakePerformance( accuracy=0.2, wake_correct=0.3, sleep_correct=0.4, auc=0.5, kappa=0.6, wake_predictive_value=0.7, sleep_predictive_value=0.8) expected_performance = SleepWakePerformance(accuracy=0.1, wake_correct=0.2, sleep_correct=0.3, auc=0.4, kappa=0.5, wake_predictive_value=0.6, sleep_predictive_value=0.7) performance_list = [sleep_wake_performance_1, sleep_wake_performance_2] actual_averaged_performance = PerformanceSummarizer.average( performance_list) TestHelper.assert_models_equal(self, expected_performance, actual_averaged_performance)
def test_leave_one_out(self): subject_ids = ["subjectA", "subjectB", "subjectC"] results = TrainTestSplitter.leave_one_out(subject_ids) TestHelper.assert_models_equal( self, DataSplit(training_set=["subjectB", "subjectC"], testing_set=["subjectA"]), results[0]) TestHelper.assert_models_equal( self, DataSplit(training_set=["subjectA", "subjectC"], testing_set=["subjectB"]), results[1]) TestHelper.assert_models_equal( self, DataSplit(training_set=["subjectA", "subjectB"], testing_set=["subjectC"]), results[2])