def test_file_output(self):
        shutil.rmtree(self.working_directory, ignore_errors=True)
        os.mkdir(self.working_directory)

        queue_mock = unittest.mock.Mock()

        context = BackendContext(
            temporary_directory=os.path.join(self.working_directory, 'tmp'),
            output_directory=os.path.join(self.working_directory, 'out'),
            delete_tmp_folder_after_terminate=True,
            delete_output_folder_after_terminate=True,
        )
        with unittest.mock.patch.object(
                Backend, 'load_datamanager') as load_datamanager_mock:
            load_datamanager_mock.return_value = get_multiclass_classification_datamanager(
            )

            backend = Backend(context)

            ae = AbstractEvaluator(
                backend=backend,
                output_y_hat_optimization=False,
                queue=queue_mock,
                metric=accuracy,
                port=self.port,
            )
            ae.model = sklearn.dummy.DummyClassifier()

            rs = np.random.RandomState()
            ae.Y_optimization = rs.rand(33, 3)
            predictions_ensemble = rs.rand(33, 3)
            predictions_test = rs.rand(25, 3)
            predictions_valid = rs.rand(25, 3)

            ae.file_output(
                Y_optimization_pred=predictions_ensemble,
                Y_valid_pred=predictions_valid,
                Y_test_pred=predictions_test,
            )

            self.assertTrue(
                os.path.exists(
                    os.path.join(self.working_directory, 'tmp',
                                 '.auto-sklearn', 'runs', '1_0_None')))

            shutil.rmtree(self.working_directory, ignore_errors=True)
Exemple #2
0
    def test_disable_file_output(self):
        queue_mock = unittest.mock.Mock()

        rs = np.random.RandomState(1)

        ae = AbstractEvaluator(
            backend=self.backend_mock,
            queue=queue_mock,
            disable_file_output=True,
            metric=accuracy,
            port=self.port,
        )

        predictions_ensemble = rs.rand(33, 3)
        predictions_test = rs.rand(25, 3)
        predictions_valid = rs.rand(25, 3)

        loss_, additional_run_info_ = (ae.file_output(
            predictions_ensemble,
            predictions_valid,
            predictions_test,
        ))

        self.assertIsNone(loss_)
        self.assertEqual(additional_run_info_, {})
        # This function is never called as there is a return before
        self.assertEqual(self.backend_mock.save_numrun_to_dir.call_count, 0)

        for call_count, disable in enumerate(['model', 'cv_model'], start=1):
            ae = AbstractEvaluator(
                backend=self.backend_mock,
                output_y_hat_optimization=False,
                queue=queue_mock,
                disable_file_output=[disable],
                metric=accuracy,
                port=self.port,
            )
            ae.Y_optimization = predictions_ensemble
            ae.model = unittest.mock.Mock()
            ae.models = [unittest.mock.Mock()]

            loss_, additional_run_info_ = (ae.file_output(
                predictions_ensemble,
                predictions_valid,
                predictions_test,
            ))

            self.assertIsNone(loss_)
            self.assertEqual(additional_run_info_, {})
            self.assertEqual(self.backend_mock.save_numrun_to_dir.call_count,
                             call_count)
            if disable == 'model':
                self.assertIsNone(
                    self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
                    ['model'])
                self.assertIsNotNone(
                    self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
                    ['cv_model'])
            else:
                self.assertIsNotNone(
                    self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
                    ['model'])
                self.assertIsNone(
                    self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
                    ['cv_model'])
            self.assertIsNotNone(
                self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
                ['ensemble_predictions'])
            self.assertIsNotNone(
                self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
                ['valid_predictions'])
            self.assertIsNotNone(
                self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
                ['test_predictions'])

        ae = AbstractEvaluator(
            backend=self.backend_mock,
            output_y_hat_optimization=False,
            queue=queue_mock,
            metric=accuracy,
            disable_file_output=['y_optimization'],
            port=self.port,
        )
        ae.Y_optimization = predictions_ensemble
        ae.model = 'model'
        ae.models = [unittest.mock.Mock()]

        loss_, additional_run_info_ = (ae.file_output(
            predictions_ensemble,
            predictions_valid,
            predictions_test,
        ))

        self.assertIsNone(loss_)
        self.assertEqual(additional_run_info_, {})

        self.assertIsNone(
            self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
            ['ensemble_predictions'])
        self.assertIsNotNone(
            self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
            ['valid_predictions'])
        self.assertIsNotNone(
            self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]
            ['test_predictions'])
    def test_disable_file_output(self, exists_mock):
        backend_mock = unittest.mock.Mock()
        backend_mock.get_model_dir.return_value = 'abc'
        D = get_multiclass_classification_datamanager()
        backend_mock.load_datamanager.return_value = D
        queue_mock = unittest.mock.Mock()

        rs = np.random.RandomState(1)

        ae = AbstractEvaluator(
            backend=backend_mock,
            queue=queue_mock,
            disable_file_output=True,
            metric=accuracy,
        )

        predictions_ensemble = rs.rand(33, 3)
        predictions_test = rs.rand(25, 3)
        predictions_valid = rs.rand(25, 3)

        loss_, additional_run_info_ = ae.file_output(predictions_ensemble,
                                                     predictions_valid,
                                                     predictions_test)

        self.assertIsNone(loss_)
        self.assertEqual(additional_run_info_, {})
        # This function is not guarded by a an if statement
        self.assertEqual(backend_mock.save_predictions_as_npy.call_count, 0)
        self.assertEqual(backend_mock.save_model.call_count, 0)

        ae = AbstractEvaluator(
            backend=backend_mock,
            output_y_hat_optimization=False,
            queue=queue_mock,
            disable_file_output=['model'],
            metric=accuracy,
        )
        ae.Y_optimization = predictions_ensemble

        loss_, additional_run_info_ = ae.file_output(predictions_ensemble,
                                                     predictions_valid,
                                                     predictions_test)

        self.assertIsNone(loss_)
        self.assertIsNone(additional_run_info_)
        # This function is not guarded by a an if statement
        self.assertEqual(backend_mock.save_predictions_as_npy.call_count, 3)
        self.assertEqual(backend_mock.save_model.call_count, 0)

        ae = AbstractEvaluator(
            backend=backend_mock,
            output_y_hat_optimization=False,
            queue=queue_mock,
            metric=accuracy,
            disable_file_output=['y_optimization'],
        )
        exists_mock.return_value = True
        ae.Y_optimization = predictions_ensemble
        ae.model = 'model'

        loss_, additional_run_info_ = ae.file_output(predictions_ensemble,
                                                     predictions_valid,
                                                     predictions_test)

        self.assertIsNone(loss_)
        self.assertIsNone(additional_run_info_)
        # This function is not guarded by a an if statement
        self.assertEqual(backend_mock.save_predictions_as_npy.call_count, 5)
        self.assertEqual(backend_mock.save_model.call_count, 1)
    def test_disable_file_output(self, exists_mock):
        backend_mock = unittest.mock.Mock()
        backend_mock.get_model_dir.return_value = 'abc'
        D = get_multiclass_classification_datamanager()
        backend_mock.load_datamanager.return_value = D
        queue_mock = unittest.mock.Mock()

        rs = np.random.RandomState(1)

        ae = AbstractEvaluator(
            backend=backend_mock,
            queue=queue_mock,
            disable_file_output=True,
            metric=accuracy,
        )

        predictions_train = rs.rand(66, 3)
        predictions_ensemble = rs.rand(33, 3)
        predictions_test = rs.rand(25, 3)
        predictions_valid = rs.rand(25, 3)

        loss_, additional_run_info_ = (
            ae.file_output(
                predictions_train,
                predictions_ensemble,
                predictions_valid,
                predictions_test,
            )
        )

        self.assertIsNone(loss_)
        self.assertEqual(additional_run_info_, {})
        # This function is not guarded by a an if statement
        self.assertEqual(backend_mock.save_predictions_as_npy.call_count, 0)
        self.assertEqual(backend_mock.save_model.call_count, 0)

        ae = AbstractEvaluator(
            backend=backend_mock,
            output_y_hat_optimization=False,
            queue=queue_mock,
            disable_file_output=['model'],
            metric=accuracy,
        )
        ae.Y_optimization = predictions_ensemble

        loss_, additional_run_info_ = (
            ae.file_output(
                predictions_train,
                predictions_ensemble,
                predictions_valid,
                predictions_test,
            )
        )

        self.assertIsNone(loss_)
        self.assertEqual(additional_run_info_, {})
        # This function is not guarded by a an if statement
        self.assertEqual(backend_mock.save_predictions_as_npy.call_count, 3)
        self.assertEqual(backend_mock.save_model.call_count, 0)

        ae = AbstractEvaluator(
            backend=backend_mock,
            output_y_hat_optimization=False,
            queue=queue_mock,
            metric=accuracy,
            disable_file_output=['y_optimization'],
        )
        exists_mock.return_value = True
        ae.Y_optimization = predictions_ensemble
        ae.model = 'model'

        loss_, additional_run_info_ = (
            ae.file_output(
                predictions_train,
                predictions_ensemble,
                predictions_valid,
                predictions_test,
            )
        )

        self.assertIsNone(loss_)
        self.assertEqual(additional_run_info_, {})
        # This function is not guarded by a an if statement
        self.assertEqual(backend_mock.save_predictions_as_npy.call_count, 5)
        self.assertEqual(backend_mock.save_model.call_count, 1)