예제 #1
0
    def test_predict_proba_binary_classification(self):
        self.output_dir = os.path.join(
            os.getcwd(), '.test_predict_proba_binary_classification')
        D = get_binary_classification_datamanager()

        class Dummy2(object):
            def predict_proba(self, y, batch_size=200):
                return np.array([[0.1, 0.9]] * 23)

            def fit(self, X, y):
                return self

        model = Dummy2()

        configuration_space = get_configuration_space(
            D.info,
            include_estimators=['extra_trees'],
            include_preprocessors=['select_rates'])
        configuration = configuration_space.sample_configuration()

        evaluator = HoldoutEvaluator(D, self.output_dir, configuration)
        evaluator.model = model
        loss, Y_optimization_pred, Y_valid_pred, Y_test_pred = \
            evaluator.fit_predict_and_loss()

        for i in range(23):
            self.assertEqual(0.9, Y_optimization_pred[i][1])
예제 #2
0
    def test_predict_proba_binary_classification(self, mock):
        D = get_binary_classification_datamanager()
        self.backend_mock.load_datamanager.return_value = D
        mock.predict_proba.side_effect = lambda y, batch_size=None: np.array(
            [[0.1, 0.9]] * y.shape[0])
        mock.side_effect = lambda **kwargs: mock

        configuration = unittest.mock.Mock(spec=Configuration)
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(self.backend_mock,
                                   queue_,
                                   configuration=configuration,
                                   metric=accuracy,
                                   budget=0,
                                   pipeline_config={
                                       'budget_type': 'epochs',
                                       'epochs': 50
                                   })

        evaluator.fit_predict_and_loss()
        Y_optimization_pred = self.backend_mock.save_numrun_to_dir.call_args_list[
            0][1]['ensemble_predictions']

        for i in range(7):
            self.assertEqual(0.9, Y_optimization_pred[i][1])
    def test_predict_proba_binary_classification(self):
        self.output_dir = os.path.join(os.getcwd(),
                                       '.test_predict_proba_binary_classification')
        D = get_binary_classification_datamanager()

        class Dummy2(object):

            def predict_proba(self, y, batch_size=200):
                return np.array([[0.1, 0.9]] * 23)

            def fit(self, X, y):
                return self

        model = Dummy2()

        configuration_space = get_configuration_space(
            D.info,
            include_estimators=['extra_trees'],
            include_preprocessors=['select_rates'])
        configuration = configuration_space.sample_configuration()

        evaluator = HoldoutEvaluator(D, self.output_dir, configuration)
        evaluator.model = model
        loss, Y_optimization_pred, Y_valid_pred, Y_test_pred = \
            evaluator.fit_predict_and_loss()

        for i in range(23):
            self.assertEqual(0.9, Y_optimization_pred[i][1])
예제 #4
0
 def test_get_results(self):
     backend_mock = unittest.mock.Mock(spec=backend.Backend)
     backend_mock.get_model_dir.return_value = 'dutirapbdxvltcrpbdlcatepdeau'
     D = get_binary_classification_datamanager()
     queue_ = multiprocessing.Queue()
     for i in range(5):
         queue_.put((i * 1, 1 - (i * 0.2), 0, "", StatusType.SUCCESS))
     result = get_last_result(queue_)
     self.assertEqual(result[0], 4)
     self.assertAlmostEqual(result[1], 0.2)
예제 #5
0
    def test_partial_cv(self, pipeline_mock):
        D = get_binary_classification_datamanager()

        pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile(
            [0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None
        output_dir = os.path.join(os.getcwd(), '.test_partial_cv')
        D = get_binary_classification_datamanager()
        D.name = 'test'

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = backend.create(output_dir, output_dir)
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   resampling_strategy='partial-cv',
                                   resampling_strategy_args={'folds': 5},
                                   all_scoring_functions=False,
                                   output_y_hat_optimization=True,
                                   metric=accuracy)

        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, None)

        evaluator.partial_fit_predict_and_loss(1)

        rval = evaluator.queue.get(timeout=1)
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(evaluator.file_output.call_count, 0)
        self.assertEqual(rval['loss'], 0.46666666666666667)
        self.assertEqual(pipeline_mock.fit.call_count, 1)
        self.assertEqual(pipeline_mock.predict_proba.call_count, 3)
        # The model prior to fitting is saved, this cannot be directly tested
        # because of the way the mock module is used. Instead, we test whether
        # the if block in which model assignment is done is accessed
        self.assertTrue(evaluator._added_empty_model)
예제 #6
0
    def test_additional_metrics_during_training(self, pipeline_mock):
        pipeline_mock.fit_dictionary = {'budget_type': 'epochs', 'epochs': 50}
        # Binary iris, contains 69 train samples, 31 test samples
        D = get_binary_classification_datamanager()
        pipeline_mock.predict_proba.side_effect = \
            lambda X, batch_size=None: np.tile([0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None

        # Binary iris, contains 69 train samples, 31 test samples
        D = get_binary_classification_datamanager()

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = create(self.tmp_dir,
                             self.output_dir,
                             prefix='autoPyTorch')
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   metric=accuracy,
                                   budget=0,
                                   pipeline_config={
                                       'budget_type': 'epochs',
                                       'epochs': 50
                                   },
                                   all_supported_metrics=True)
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, {})

        evaluator.fit_predict_and_loss()

        rval = read_queue(evaluator.queue)
        self.assertEqual(len(rval), 1)
        result = rval[0]
        self.assertIn('additional_run_info', result)
        self.assertIn('opt_loss', result['additional_run_info'])
        self.assertGreater(
            len(result['additional_run_info']['opt_loss'].keys()), 1)
예제 #7
0
    def test_cv(self, pipeline_mock):
        D = get_binary_classification_datamanager(
            resampling_strategy=CrossValTypes.k_fold_cross_validation)

        pipeline_mock.predict_proba.side_effect = \
            lambda X, batch_size=None: np.tile([0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = create(self.tmp_dir,
                             self.output_dir,
                             prefix='autoPyTorch')
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   metric=accuracy,
                                   budget=0,
                                   pipeline_config={
                                       'budget_type': 'epochs',
                                       'epochs': 50
                                   })
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, {})

        evaluator.fit_predict_and_loss()

        rval = read_queue(evaluator.queue)
        self.assertEqual(len(rval), 1)
        result = rval[0]['loss']
        self.assertEqual(len(rval[0]), 3)
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(result, 0.46235467431119603)
        self.assertEqual(pipeline_mock.fit.call_count, 5)
        # 9 calls because of the training, holdout and
        # test set (3 sets x 5 folds = 15)
        self.assertEqual(pipeline_mock.predict_proba.call_count, 15)
        # as the optimisation preds in cv is concatenation of the 5 folds,
        # so it is 5*splits
        self.assertEqual(
            evaluator.file_output.call_args[0][0].shape[0],
            # Notice this - 1: It is because the dataset D
            # has shape ((69, )) which is not divisible by 5
            5 * len(D.splits[0][1]) - 1,
            evaluator.file_output.call_args)
        self.assertIsNone(evaluator.file_output.call_args[0][1])
        self.assertEqual(evaluator.file_output.call_args[0][2].shape[0],
                         D.test_tensors[1].shape[0])
예제 #8
0
    def test_iterative_holdout_not_iterative(self, pipeline_mock):
        # Regular fitting
        D = get_binary_classification_datamanager()
        D.name = 'test'

        Xt_fixture = 'Xt_fixture'
        pipeline_mock.estimator_supports_iterative_fit.return_value = False
        pipeline_mock.fit_transformer.return_value = Xt_fixture, {}
        pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile(
            [0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None
        output_dir = os.path.join(os.getcwd(),
                                  '.test_iterative_holdout_not_iterative')

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = backend.create(output_dir, output_dir)
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   resampling_strategy='holdout-iterative-fit',
                                   all_scoring_functions=False,
                                   output_y_hat_optimization=True,
                                   metric=accuracy)
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, None)

        evaluator.fit_predict_and_loss(iterative=True)
        self.assertEqual(evaluator.file_output.call_count, 1)

        rval = evaluator.queue.get(timeout=1)
        self.assertAlmostEqual(rval['loss'], 0.47826086956521741)
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(pipeline_mock.iterative_fit.call_count, 0)
        # fifteen calls because of the holdout, the validation and the test set
        # and a total of five calls because of five iterations of fitting
        self.assertEqual(evaluator.model.predict_proba.call_count, 3)
        self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 23)
        self.assertEqual(evaluator.file_output.call_args[0][1].shape[0],
                         D.data['Y_valid'].shape[0])
        self.assertEqual(evaluator.file_output.call_args[0][2].shape[0],
                         D.data['Y_test'].shape[0])
        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(evaluator.model.fit.call_count, 1)
예제 #9
0
    def test_holdout(self, pipeline_mock):
        pipeline_mock.fit_dictionary = {'budget_type': 'epochs', 'epochs': 50}
        # Binary iris, contains 69 train samples, 31 test samples
        D = get_binary_classification_datamanager()
        pipeline_mock.predict_proba.side_effect = \
            lambda X, batch_size=None: np.tile([0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = create(self.tmp_dir,
                             self.output_dir,
                             prefix='autoPyTorch')
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   metric=accuracy,
                                   budget=0,
                                   pipeline_config={
                                       'budget_type': 'epochs',
                                       'epochs': 50
                                   })
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, {})

        evaluator.fit_predict_and_loss()

        rval = read_queue(evaluator.queue)
        self.assertEqual(len(rval), 1)
        result = rval[0]['loss']
        self.assertEqual(len(rval[0]), 3)
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(result, 0.5652173913043479)
        self.assertEqual(pipeline_mock.fit.call_count, 1)
        # 3 calls because of train, holdout and test set
        self.assertEqual(pipeline_mock.predict_proba.call_count, 3)
        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(evaluator.file_output.call_args[0][0].shape[0],
                         len(D.splits[0][1]))
        self.assertIsNone(evaluator.file_output.call_args[0][1])
        self.assertEqual(evaluator.file_output.call_args[0][2].shape[0],
                         D.test_tensors[1].shape[0])
        self.assertEqual(evaluator.pipeline.fit.call_count, 1)
예제 #10
0
    def test_holdout(self, pipeline_mock):
        D = get_binary_classification_datamanager()
        D.name = 'test'

        pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile(
            [0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None
        output_dir = os.path.join(os.getcwd(), '.test_holdout')

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = backend.create(output_dir, output_dir)
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(
            backend_api,
            queue_,
            configuration=configuration,
            resampling_strategy='holdout',
            resampling_strategy_args={'train_size': 0.66},
            all_scoring_functions=False,
            output_y_hat_optimization=True,
            metric=accuracy,
            subsample=50)
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, None)

        evaluator.fit_predict_and_loss()

        rval = get_last_result(evaluator.queue)
        result = rval['loss']
        self.assertEqual(len(rval), 3)
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(result, 0.45833333333333337)
        self.assertEqual(pipeline_mock.fit.call_count, 1)
        # three calls because of the holdout, the validation and the test set
        self.assertEqual(pipeline_mock.predict_proba.call_count, 3)
        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 24)
        self.assertEqual(evaluator.file_output.call_args[0][1].shape[0],
                         D.data['Y_valid'].shape[0])
        self.assertEqual(evaluator.file_output.call_args[0][2].shape[0],
                         D.data['Y_test'].shape[0])
        self.assertEqual(evaluator.model.fit.call_count, 1)
예제 #11
0
    def test_cv(self, pipeline_mock):
        D = get_binary_classification_datamanager()
        kfold = StratifiedKFold(random_state=1, n_splits=5, shuffle=True)

        pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile(
            [0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        output_dir = os.path.join(os.getcwd(), '.test_cv')

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = backend.create(output_dir, output_dir)
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(D,
                                   backend_api,
                                   queue_,
                                   configuration=configuration,
                                   cv=kfold,
                                   all_scoring_functions=False,
                                   output_y_hat_optimization=True,
                                   metric=accuracy)
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, None)

        evaluator.fit_predict_and_loss()

        rval = get_last_result(evaluator.queue)
        result = rval['loss']
        self.assertEqual(len(rval), 3)
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(result, 0.46376811594202894)
        self.assertEqual(pipeline_mock.fit.call_count, 5)
        # Fifteen calls because of the holdout, the validation and the test set
        self.assertEqual(pipeline_mock.predict_proba.call_count, 15)
        self.assertEqual(evaluator.file_output.call_args[0][0].shape[0],
                         D.data['Y_train'].shape[0])
        self.assertEqual(evaluator.file_output.call_args[0][1].shape[0],
                         D.data['Y_valid'].shape[0])
        self.assertEqual(evaluator.file_output.call_args[0][2].shape[0],
                         D.data['Y_test'].shape[0])
        # The model prior to fitting is saved, this cannot be directly tested
        # because of the way the mock module is used. Instead, we test whether
        # the if block in which model assignment is done is accessed
        self.assertTrue(evaluator._added_empty_model)
예제 #12
0
    def test_subsample_indices_classification(self, mock, backend_mock):

        configuration = unittest.mock.Mock(spec=Configuration)
        queue_ = multiprocessing.Queue()
        D = get_binary_classification_datamanager()
        backend_mock.load_datamanager.return_value = D
        evaluator = TrainEvaluator(backend_mock,
                                   queue_,
                                   configuration=configuration,
                                   resampling_strategy='cv',
                                   resampling_strategy_args={'folds': 10},
                                   subsample=10,
                                   metric=accuracy)
        train_indices = np.arange(69, dtype=int)
        train_indices1 = evaluator.subsample_indices(train_indices)
        evaluator.subsample = 20
        train_indices2 = evaluator.subsample_indices(train_indices)
        evaluator.subsample = 30
        train_indices3 = evaluator.subsample_indices(train_indices)
        evaluator.subsample = 67
        train_indices4 = evaluator.subsample_indices(train_indices)
        # Common cases
        for ti in train_indices1:
            self.assertIn(ti, train_indices2)
        for ti in train_indices2:
            self.assertIn(ti, train_indices3)
        for ti in train_indices3:
            self.assertIn(ti, train_indices4)

        # Corner cases
        evaluator.subsample = 0
        self.assertRaisesRegex(
            ValueError, 'The train_size = 0 should be '
            'greater or equal to the number '
            'of classes = 2', evaluator.subsample_indices, train_indices)
        # With equal or greater it should return a non-shuffled array of indices
        evaluator.subsample = 69
        train_indices5 = evaluator.subsample_indices(train_indices)
        self.assertTrue(np.all(train_indices5 == train_indices))
        evaluator.subsample = 68
        self.assertRaisesRegex(
            ValueError, 'The test_size = 1 should be greater'
            ' or equal to the number of '
            'classes = 2', evaluator.subsample_indices, train_indices)
예제 #13
0
    def test_predict_proba_binary_classification(self, mock, backend_mock):
        D = get_binary_classification_datamanager()
        backend_mock.load_datamanager.return_value = D
        mock.predict_proba.side_effect = lambda y, batch_size: np.array(
            [[0.1, 0.9]] * y.shape[0])
        mock.side_effect = lambda **kwargs: mock

        configuration = unittest.mock.Mock(spec=Configuration)
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_mock,
                                   queue_,
                                   configuration=configuration,
                                   resampling_strategy='cv',
                                   resampling_strategy_args={'folds': 10},
                                   output_y_hat_optimization=False,
                                   metric=accuracy)
        evaluator.fit_predict_and_loss()
        Y_optimization_pred = backend_mock.save_predictions_as_npy.call_args_list[
            0][0][0]

        for i in range(7):
            self.assertEqual(0.9, Y_optimization_pred[i][1])
예제 #14
0
    def test_predict_proba_binary_classification(self, mock, backend_mock):
        D = get_binary_classification_datamanager()
        mock.predict_proba.side_effect = lambda y, batch_size: np.array(
            [[0.1, 0.9]] * 7)
        mock.side_effect = lambda **kwargs: mock

        configuration = unittest.mock.Mock(spec=Configuration)
        queue_ = multiprocessing.Queue()
        kfold = ShuffleSplit(random_state=1, n_splits=1)

        evaluator = TrainEvaluator(D,
                                   backend_mock,
                                   queue_,
                                   configuration=configuration,
                                   cv=kfold,
                                   output_y_hat_optimization=False,
                                   metric=accuracy)
        evaluator.fit_predict_and_loss()
        Y_optimization_pred = backend_mock.save_predictions_as_npy.call_args_list[
            0][0][0]

        for i in range(7):
            self.assertEqual(0.9, Y_optimization_pred[i][1])
예제 #15
0
    def test_iterative_holdout(self, pipeline_mock):
        # Regular fitting
        D = get_binary_classification_datamanager()
        D.name = 'test'

        class SideEffect(object):
            def __init__(self):
                self.fully_fitted_call_count = 0

            def configuration_fully_fitted(self):
                self.fully_fitted_call_count += 1
                # Is called twice as often as call to fit because we also check
                # if we need to add a special indicator to show that this is the
                # final call to iterative fit
                return self.fully_fitted_call_count > 10

        Xt_fixture = 'Xt_fixture'
        pipeline_mock.estimator_supports_iterative_fit.return_value = True
        pipeline_mock.configuration_fully_fitted.side_effect = SideEffect(
        ).configuration_fully_fitted
        pipeline_mock.fit_transformer.return_value = Xt_fixture, {}
        pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile(
            [0.6, 0.4], (len(X), 1))
        pipeline_mock.get_additional_run_info.return_value = None
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        output_dir = os.path.join(os.getcwd(), '.test_iterative_holdout')

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = backend.create(output_dir, output_dir)
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   resampling_strategy='holdout',
                                   all_scoring_functions=False,
                                   output_y_hat_optimization=True,
                                   metric=accuracy)
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, None)

        class LossSideEffect(object):
            def __init__(self):
                self.losses = [1.0, 0.8, 0.6, 0.4, 0.2, 0.0]
                self.iteration = 0

            def side_effect(self, *args):
                self.iteration += 1
                return self.losses[self.iteration]

        evaluator._loss = unittest.mock.Mock()
        evaluator._loss.side_effect = LossSideEffect().side_effect

        evaluator.fit_predict_and_loss(iterative=True)
        self.assertEqual(evaluator.file_output.call_count, 5)

        for i in range(1, 6):
            rval = evaluator.queue.get(timeout=1)
            result = rval['loss']
            self.assertEqual(len(rval), 3)
            self.assertAlmostEqual(result, 1.0 - (0.2 * i))
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(pipeline_mock.iterative_fit.call_count, 5)
        self.assertEqual([
            cal[1]['n_iter']
            for cal in pipeline_mock.iterative_fit.call_args_list
        ], [2, 4, 8, 16, 32])
        # fifteen calls because of the holdout, the validation and the test set
        # and a total of five calls because of five iterations of fitting
        self.assertEqual(evaluator.model.predict_proba.call_count, 15)
        # 1/3 of 69
        self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 23)
        self.assertEqual(evaluator.file_output.call_args[0][1].shape[0],
                         D.data['Y_valid'].shape[0])
        self.assertEqual(evaluator.file_output.call_args[0][2].shape[0],
                         D.data['Y_test'].shape[0])
        self.assertEqual(evaluator.file_output.call_count, 5)
        self.assertEqual(evaluator.model.fit.call_count, 0)
예제 #16
0
    def test_fit_predict_and_loss_additional_run_info(
        self,
        mock,
        backend_mock,
        _partial_fit_and_predict_mock,
        file_output_mock,
    ):
        D = get_binary_classification_datamanager()
        backend_mock.load_datamanager.return_value = D
        mock.side_effect = lambda **kwargs: mock
        _partial_fit_and_predict_mock.return_value = ([[0.1, 0.9]] * 23,
                                                      [[0.1, 0.9]] * 7,
                                                      [[0.1, 0.9]] * 7, {
                                                          'a': 5
                                                      })
        file_output_mock.return_value = None, None

        configuration = unittest.mock.Mock(spec=Configuration)
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(
            backend_mock,
            queue_,
            configuration=configuration,
            resampling_strategy='holdout',
            output_y_hat_optimization=False,
            metric=accuracy,
        )
        evaluator.Y_targets[0] = [1] * 23
        evaluator.fit_predict_and_loss(iterative=False)

        class SideEffect(object):
            def __init__(self):
                self.n_call = 0

            def __call__(self, *args, **kwargs):
                if self.n_call == 0:
                    self.n_call += 1
                    return ([[0.1, 0.9]] * 35, [[0.1, 0.9]] * 7,
                            [[0.1, 0.9]] * 7, {
                                'a': 5
                            })
                else:
                    return ([[0.1, 0.9]] * 34, [[0.1, 0.9]] * 7,
                            [[0.1, 0.9]] * 7, {
                                'a': 5
                            })

        _partial_fit_and_predict_mock.side_effect = SideEffect()
        evaluator = TrainEvaluator(
            backend_mock,
            queue_,
            configuration=configuration,
            resampling_strategy='cv',
            resampling_strategy_args={'folds': 2},
            output_y_hat_optimization=False,
            metric=accuracy,
        )
        evaluator.Y_targets[0] = [1] * 35
        evaluator.Y_targets[1] = [1] * 34

        self.assertRaises(TAEAbortException,
                          evaluator.fit_predict_and_loss,
                          iterative=False)