def test_eval_test_all_loss_functions(self): eval_t(queue=self.queue, backend=self.backend, config=self.configuration, data=self.data, seed=1, num_run=1, subsample=None, with_predictions=True, all_scoring_functions=True, output_y_test=True, include=None, exclude=None, disable_file_output=False) info = get_last_result(self.queue) fixture = { 'f1_metric': 0.0511508951407, 'pac_metric': 0.185257565321, 'acc_metric': 0.06, 'auc_metric': 0.00917546505782, 'bac_metric': 0.0416666666667, 'num_run': -1 } rval = { i.split(':')[0]: float(i.split(':')[1]) for i in info[3].split(';') } for key, value in fixture.items(): self.assertAlmostEqual(rval[key], fixture[key]) self.assertIn('duration', rval) self.assertAlmostEqual(info[1], 0.041666666666666852) self.assertEqual(info[2], 1)
def test_eval_test(self): eval_t(self.queue, self.configuration, self.data, self.tmp_dir, 1, 1, None, True, False, True) info = self.queue.get() self.assertAlmostEqual(info[1], 0.041666666666666852) self.assertEqual(info[2], 1) self.assertNotIn('bac_metric', info[3])
def test_eval_test_all_loss_functions(self): eval_t(self.queue, self.configuration, self.data, self.tmp_dir, 1, 1, None, True, True, True) info = self.queue.get() self.assertIn('f1_metric: 0.0511508951407;pac_metric: 0.273385527265;' 'acc_metric: 0.06;auc_metric: 0.00917546505782;' 'bac_metric: 0.0416666666667;duration: ', info[3]) self.assertAlmostEqual(info[1], 0.041666666666666852) self.assertEqual(info[2], 1)
def test_eval_test(self): eval_t(queue=self.queue, backend=self.backend, config=self.configuration, data=self.data, seed=1, num_run=1, subsample=None, with_predictions=True, all_scoring_functions=False, output_y_test=True, include=None, exclude=None, disable_file_output=False) info = get_last_result(self.queue) self.assertAlmostEqual(info[1], 0.041666666666666852) self.assertEqual(info[2], 1) self.assertNotIn('bac_metric', info[3])