示例#1
0
    def test_eval_test_all_loss_functions(self):
        eval_t(
            queue=self.queue,
            backend=self.backend,
            config=self.configuration,
            metric=accuracy,
            seed=1,
            num_run=1,
            scoring_functions=SCORER_LIST,
            output_y_hat_optimization=False,
            include=None,
            exclude=None,
            disable_file_output=False,
            instance=self.dataset_name,
            port=self.port,
        )
        rval = read_queue(self.queue)
        self.assertEqual(len(rval), 1)

        # Note: All metric here should be minimized
        fixture = {
            'accuracy': 0.040000000000000036,
            'balanced_accuracy': 0.02777777777777779,
            'f1_macro': 0.0341005967604433,
            'f1_micro': 0.040000000000000036,
            'f1_weighted': 0.039693094629155934,
            'log_loss': 0.13966929787769913,
            'precision_macro': 0.03703703703703709,
            'precision_micro': 0.040000000000000036,
            'precision_weighted': 0.03555555555555556,
            'recall_macro': 0.02777777777777779,
            'recall_micro': 0.040000000000000036,
            'recall_weighted': 0.040000000000000036,
            'num_run': -1
        }

        additional_run_info = rval[0]['additional_run_info']
        for key, value in fixture.items():
            self.assertAlmostEqual(additional_run_info[key],
                                   fixture[key],
                                   msg=key)
        self.assertEqual(len(additional_run_info),
                         len(fixture) + 1,
                         msg=sorted(additional_run_info.items()))
        self.assertIn('duration', additional_run_info)
        self.assertAlmostEqual(rval[0]['loss'], 0.040000000000000036)
        self.assertEqual(rval[0]['status'], StatusType.SUCCESS)
示例#2
0
    def test_eval_test_all_loss_functions(self):
        eval_t(
            queue=self.queue,
            backend=self.backend,
            config=self.configuration,
            metric=accuracy,
            seed=1,
            num_run=1,
            all_scoring_functions=True,
            output_y_hat_optimization=False,
            include=None,
            exclude=None,
            disable_file_output=False,
            instance=self.dataset_name,
        )
        rval = read_queue(self.queue)
        self.assertEqual(len(rval), 1)

        fixture = {
            'accuracy': 0.08,
            'balanced_accuracy': 0.05555555555555547,
            'f1_macro': 0.06734006734006737,
            'f1_micro': 0.08,
            'f1_weighted': 0.07919191919191915,
            'log_loss': 1.128776115477085,
            'pac_score': 0.187005982641133,
            'precision_macro': 0.06666666666666676,
            'precision_micro': 0.08,
            'precision_weighted': 0.064,
            'recall_macro': 0.05555555555555547,
            'recall_micro': 0.08,
            'recall_weighted': 0.08,
            'num_run': -1
        }

        additional_run_info = rval[0]['additional_run_info']
        for key, value in fixture.items():
            self.assertAlmostEqual(additional_run_info[key],
                                   fixture[key],
                                   msg=key)
        self.assertEqual(len(additional_run_info),
                         len(fixture) + 1,
                         msg=sorted(additional_run_info.items()))
        self.assertIn('duration', additional_run_info)
        self.assertAlmostEqual(rval[0]['loss'], 0.08)
        self.assertEqual(rval[0]['status'], StatusType.SUCCESS)
 def test_eval_test(self):
     eval_t(queue=self.queue,
            backend=self.backend,
            config=self.configuration,
            metric=accuracy,
            seed=1,
            num_run=1,
            all_scoring_functions=False,
            output_y_hat_optimization=False,
            include=None,
            exclude=None,
            disable_file_output=False,
            instance=self.dataset_name)
     rval = get_last_result(self.queue)
     self.assertAlmostEqual(rval['loss'], 0.04)
     self.assertEqual(rval['status'], StatusType.SUCCESS)
     self.assertNotIn('bac_metric', rval['additional_run_info'])
    def test_eval_test_all_loss_functions(self):
        eval_t(
            queue=self.queue,
            backend=self.backend,
            config=self.configuration,
            metric=accuracy,
            seed=1,
            num_run=1,
            all_scoring_functions=True,
            output_y_hat_optimization=False,
            include=None,
            exclude=None,
            disable_file_output=False,
            instance=self.dataset_name,
        )
        rval = get_last_result(self.queue)
        fixture = {
            'accuracy': 0.04,
            'balanced_accuracy': 0.0277777777778,
            'f1_macro': 0.0341005967604,
            'f1_micro': 0.04,
            'f1_weighted': 0.0396930946292,
            'log_loss': 1.1352229526638984,
            'pac_score': 0.19574985585209126,
            'precision_macro': 0.037037037037,
            'precision_micro': 0.04,
            'precision_weighted': 0.0355555555556,
            'recall_macro': 0.0277777777778,
            'recall_micro': 0.04,
            'recall_weighted': 0.04,
            'num_run': -1
        }

        additional_run_info = rval['additional_run_info']
        for key, value in fixture.items():
            self.assertAlmostEqual(additional_run_info[key],
                                   fixture[key],
                                   msg=key)
        self.assertEqual(len(additional_run_info),
                         len(fixture) + 1,
                         msg=sorted(additional_run_info.items()))
        self.assertIn('duration', additional_run_info)
        self.assertAlmostEqual(rval['loss'], 0.04)
        self.assertEqual(rval['status'], StatusType.SUCCESS)
 def test_eval_test(self):
     eval_t(
         queue=self.queue,
         backend=self.backend,
         config=self.configuration,
         metric=accuracy,
         seed=1, num_run=1,
         scoring_functions=None,
         output_y_hat_optimization=False,
         include=None,
         exclude=None,
         disable_file_output=False,
         instance=self.dataset_name,
         port=self.port,
         additional_components=dict(),
     )
     rval = read_queue(self.queue)
     self.assertEqual(len(rval), 1)
     self.assertAlmostEqual(rval[0]['loss'], 0.040000000000000036)
     self.assertEqual(rval[0]['status'], StatusType.SUCCESS)
     self.assertNotIn('bac_metric', rval[0]['additional_run_info'])