コード例 #1
0
def make_mode_cv(data, seed, configuration, num_run, folds):
    evaluator = CVEvaluator(data, configuration,
                            cv_folds=folds,
                            seed=seed,
                            num_run=num_run,
                            **_get_base_dict())
    evaluator.fit()
    signal.signal(15, empty_signal_handler)
    evaluator.finish_up()
コード例 #2
0
def make_mode_partial_cv(data, seed, configuration, num_run, metric, fold,
                         folds):
    evaluator = CVEvaluator(data, configuration,
                            all_scoring_functions=True,
                            cv_folds=folds,
                            seed=seed,
                            num_run=num_run)
    evaluator.partial_fit(fold)
    scores = evaluator.predict()
    duration = time.time() - evaluator.starttime

    score = scores[metric]
    additional_run_info = ';'.join(['%s: %s' % (m_, value)
                                    for m_, value in scores.items()])
    additional_run_info += ';' + 'duration: ' + str(duration)

    print('Result for ParamILS: %s, %f, 1, %f, %d, %s' %
          ('SAT', abs(duration), score, evaluator.seed,
           additional_run_info))
コード例 #3
0
    def test_with_abalone(self):
        dataset = 'abalone'
        dataset_dir = os.path.join(os.path.dirname(__file__), '.datasets')
        D = CompetitionDataManager(dataset, dataset_dir)
        configuration_space = get_configuration_space(
            D.info,
            include_estimators=['extra_trees'],
            include_preprocessors=['no_preprocessing'])

        errors = []
        for i in range(N_TEST_RUNS):
            configuration = configuration_space.sample_configuration()
            D_ = copy.deepcopy(D)
            evaluator = CVEvaluator(D_, configuration, cv_folds=5)
            if not self._fit(evaluator):
                continue
            err = evaluator.predict()
            self.assertLess(err, 0.99)
            self.assertTrue(np.isfinite(err))
            errors.append(err)
        # This is a reasonable bound
        self.assertEqual(10, len(errors))
        self.assertLess(min(errors), 0.77)
コード例 #4
0
    def test_evaluate_multiclass_classification_partial_fit(self):
        X_train, Y_train, X_test, Y_test = get_dataset('iris')

        X_valid = X_test[:25, ]
        Y_valid = Y_test[:25, ]
        X_test = X_test[25:, ]
        Y_test = Y_test[25:, ]

        D = Dummy()
        D.info = {
            'metric': 'bac_metric',
            'task': MULTICLASS_CLASSIFICATION,
            'is_sparse': False,
            'target_num': 3
        }
        D.data = {
            'X_train': X_train,
            'Y_train': Y_train,
            'X_valid': X_valid,
            'X_test': X_test
        }
        D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']

        configuration_space = get_configuration_space(
            D.info,
            include_estimators=['ridge'],
            include_preprocessors=['select_rates'])

        err = np.zeros([N_TEST_RUNS])
        num_models_better_than_random = 0
        for i in range(N_TEST_RUNS):
            print('Evaluate configuration: %d; result:' % i)
            configuration = configuration_space.sample_configuration()
            D_ = copy.deepcopy(D)
            evaluator = CVEvaluator(D_, configuration, with_predictions=True)

            if not self._partial_fit(evaluator, fold=i % 10):
                print()
                continue
            e_, Y_optimization_pred, Y_valid_pred, Y_test_pred = \
                evaluator.predict()
            err[i] = e_
            print(err[i], configuration['classifier'])

            self.assertTrue(np.isfinite(err[i]))
            self.assertGreaterEqual(err[i], 0.0)
            # Test that only one model was trained
            self.assertEqual(len(evaluator.models), 10)
            self.assertEqual(1, np.sum([True if model is not None else False
                                        for model in evaluator.models]))
            self.assertLess(Y_optimization_pred.shape[0], 13)
            self.assertEqual(Y_valid_pred.shape[0], Y_valid.shape[0])
            self.assertEqual(Y_test_pred.shape[0], Y_test.shape[0])
            # Test some basic statistics of the dataset
            if err[i] < 0.5:
                self.assertTrue(0.3 < Y_valid_pred.mean() < 0.36666)
                self.assertGreaterEqual(Y_valid_pred.std(), 0.01)
                self.assertTrue(0.3 < Y_test_pred.mean() < 0.36666)
                self.assertGreaterEqual(Y_test_pred.std(), 0.01)
                num_models_better_than_random += 1
        self.assertGreaterEqual(num_models_better_than_random, 5)