def test_evaluate_multiclass_classification(self):
        X_train, Y_train, X_test, Y_test = get_dataset('iris')
        X_valid = X_test[:25,]
        Y_valid = Y_test[:25,]
        X_test = X_test[25:,]
        Y_test = Y_test[25:,]

        D = Dummy()
        D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
                  'is_sparse': False, 'target_num': 3}
        D.data = {'X_train': X_train, 'Y_train': Y_train,
                  'X_valid': X_valid, 'X_test': X_test}
        D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']

        configuration_space = get_configuration_space(D.info,
            include_estimators = ['ridge'],
            include_preprocessors = ['select_rates'])

        err = np.zeros([N_TEST_RUNS])
        for i in range(N_TEST_RUNS):
            print "Evaluate configuration: %d; result:" % i,
            configuration = configuration_space.sample_configuration()
            D_ = copy.deepcopy(D)
            evaluator = HoldoutEvaluator(D_, configuration)

            if not self._fit(evaluator):
                print
                continue
            err[i] = evaluator.predict()
            print err[i]

            self.assertTrue(np.isfinite(err[i]))
            self.assertGreaterEqual(err[i], 0.0)

        print "Number of times it was worse than random guessing:" + str(np.sum(err > 1))
    def test_5000_classes(self):
        weights = ([0.0002] * 4750) + ([0.0001] * 250)
        X, Y = sklearn.datasets.make_classification(n_samples=10000,
                                                    n_features=20,
                                                    n_classes=5000,
                                                    n_clusters_per_class=1,
                                                    n_informative=15,
                                                    n_redundant=5,
                                                    n_repeated=0,
                                                    weights=weights,
                                                    flip_y=0,
                                                    class_sep=1.0,
                                                    hypercube=True,
                                                    shift=None,
                                                    scale=1.0,
                                                    shuffle=True,
                                                    random_state=1)

        self.assertEqual(250, np.sum(np.bincount(Y) == 1))
        D = Dummy()
        D.info = {'metric': 'r2_metric', 'task': MULTICLASS_CLASSIFICATION,
                  'is_sparse': False, 'target_num': 1}
        D.data = {'X_train': X, 'Y_train': Y,
                  'X_valid': X, 'X_test': X}
        D.feat_type = ['numerical'] * 5000

        configuration_space = get_configuration_space(D.info,
            include_estimators=['extra_trees'],
            include_preprocessors=['no_preprocessing'])
        configuration = configuration_space.sample_configuration()
        D_ = copy.deepcopy(D)
        evaluator = HoldoutEvaluator(D_, configuration)
        evaluator.fit()
    def test_predict_proba_binary_classification(self):
        X_train, Y_train, X_test, Y_test = get_dataset('iris')

        eliminate_class_two = Y_train != 2
        X_train = X_train[eliminate_class_two]
        Y_train = Y_train[eliminate_class_two]

        eliminate_class_two = Y_test != 2
        X_test = X_test[eliminate_class_two]
        Y_test = Y_test[eliminate_class_two]

        X_valid = X_test[:25, ]
        Y_valid = Y_test[:25, ]
        X_test = X_test[25:, ]
        Y_test = Y_test[25:, ]

        class Dummy2(object):
            def predict_proba(self, y, batch_size=200):
                return np.array([[0.1, 0.9], [0.7, 0.3]])

        model = Dummy2()
        task_type = BINARY_CLASSIFICATION

        D = Dummy()
        D.info = {'metric': 'bac_metric', 'task': task_type,
                  'is_sparse': False, 'target_num': 3}
        D.data = {'X_train': X_train, 'Y_train': Y_train,
                  'X_valid': X_valid, 'X_test': X_test}
        D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']

        configuration_space = get_configuration_space(
            D.info, include_estimators=['ridge'],
            include_preprocessors=['select_rates'])
        configuration = configuration_space.sample_configuration()

        evaluator = HoldoutEvaluator(D, configuration)
        pred = evaluator.predict_proba(None, model, task_type)
        expected = [[0.9], [0.3]]
        for i in range(len(expected)):
            self.assertEqual(expected[i], pred[i])
    def test_with_abalone(self):
        dataset = "abalone"
        dataset_dir = os.path.join(os.path.dirname(__file__), ".datasets")
        D = CompetitionDataManager(dataset, dataset_dir)
        configuration_space = get_configuration_space(D.info,
            include_estimators=['extra_trees'],
            include_preprocessors=['no_preprocessing'])

        errors = []
        for i in range(N_TEST_RUNS):
            configuration = configuration_space.sample_configuration()
            D_ = copy.deepcopy(D)
            evaluator = HoldoutEvaluator(D_, configuration)
            if not self._fit(evaluator):
                print
                continue
            err = evaluator.predict()
            self.assertLess(err, 0.99)
            self.assertTrue(np.isfinite(err))
            errors.append(err)
        # This is a reasonable bound
        self.assertEqual(10, len(errors))
        self.assertLess(min(errors), 0.77)
    def test_file_output(self):
        output_dir = os.path.join(os.getcwd(), ".test")

        try:
            shutil.rmtree(output_dir)
        except:
            pass

        X_train, Y_train, X_test, Y_test = get_dataset('iris')
        X_valid = X_test[:25, ]
        Y_valid = Y_test[:25, ]
        X_test = X_test[25:, ]
        Y_test = Y_test[25:, ]

        D = Dummy()
        D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
                  'is_sparse': False, 'target_num': 3}
        D.data = {'X_train': X_train, 'Y_train': Y_train,
                  'X_valid': X_valid, 'X_test': X_test}
        D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
        D.basename = "test"


        configuration_space = get_configuration_space(D.info)

        while True:
            configuration = configuration_space.sample_configuration()
            evaluator = HoldoutEvaluator(D, configuration,
                                         with_predictions=True,
                                         all_scoring_functions=True,
                                         output_dir=output_dir,
                                         output_y_test=True)

            if not self._fit(evaluator):
                print
                continue
            evaluator.predict()
            evaluator.file_output()

            self.assertTrue(os.path.exists(os.path.join(output_dir,
                                                        "y_optimization.npy")))
            break
Ejemplo n.º 6
0
def main(dataset_info, mode, seed, params, mode_args=None):
    """This command line interface has three different operation modes:

    * CV: useful for the Tweakathon
    * 1/3 test split: useful to evaluate a configuration
    * cv on 2/3 train split: useful to optimize hyperparameters in a training
      mode before testing a configuration on the 1/3 test split.

    It must by no means be used for the Auto part of the competition!
    """
    if mode != "test":
        num_run = get_new_run_num()

    for key in params:
        try:
            params[key] = int(params[key])
        except:
            try:
                params[key] = float(params[key])
            except:
                pass

    if seed is not None:
        seed = int(float(seed))
    else:
        seed = 1

    output_dir = os.getcwd()

    D = store_and_or_load_data(dataset_info=dataset_info, outputdir=output_dir)

    cs = get_configuration_space(D.info)
    configuration = configuration_space.Configuration(cs, params)
    metric = D.info['metric']

    global evaluator
    # Train/test split
    if mode == 'holdout':
        evaluator = HoldoutEvaluator(D,
                                     configuration,
                                     with_predictions=True,
                                     all_scoring_functions=True,
                                     output_y_test=True,
                                     seed=seed,
                                     num_run=num_run)
        evaluator.fit()
        signal.signal(15, empty_signal_handler)
        evaluator.finish_up()
        model_directory = os.path.join(os.getcwd(), "models_%d" % seed)
        if os.path.exists(model_directory):
            model_filename = os.path.join(model_directory,
                                          "%s.model" % num_run)
            with open(model_filename, "w") as fh:
                pickle.dump(evaluator.model, fh, -1)

    elif mode == 'test':
        evaluator = TestEvaluator(D,
                                  configuration,
                                  all_scoring_functions=True,
                                  seed=seed)
        evaluator.fit()
        scores = evaluator.predict()
        duration = time.time() - evaluator.starttime

        score = scores[metric]
        additional_run_info = ";".join(
            ["%s: %s" % (m_, value) for m_, value in scores.items()])
        additional_run_info += ";" + "duration: " + str(duration)

        print "Result for ParamILS: %s, %f, 1, %f, %d, %s" % (
            "SAT", abs(duration), score, evaluator.seed, additional_run_info)

    # CV on the whole training set
    elif mode == 'cv':
        evaluator = CVEvaluator(D,
                                configuration,
                                with_predictions=True,
                                all_scoring_functions=True,
                                output_y_test=True,
                                cv_folds=mode_args['folds'],
                                seed=seed,
                                num_run=num_run)
        evaluator.fit()
        signal.signal(15, empty_signal_handler)
        evaluator.finish_up()

    elif mode == 'partial_cv':
        evaluator = CVEvaluator(D,
                                configuration,
                                all_scoring_functions=True,
                                cv_folds=mode_args['folds'],
                                seed=seed,
                                num_run=num_run)
        evaluator.partial_fit(mode_args['fold'])
        scores = evaluator.predict()
        duration = time.time() - evaluator.starttime

        score = scores[metric]
        additional_run_info = ";".join(
            ["%s: %s" % (m_, value) for m_, value in scores.items()])
        additional_run_info += ";" + "duration: " + str(duration)

        print "Result for ParamILS: %s, %f, 1, %f, %d, %s" % (
            "SAT", abs(duration), score, evaluator.seed, additional_run_info)

    elif mode == 'nested-cv':
        evaluator = NestedCVEvaluator(D,
                                      configuration,
                                      with_predictions=True,
                                      inner_cv_folds=mode_args['inner_folds'],
                                      outer_cv_folds=mode_args['outer_folds'],
                                      all_scoring_functions=True,
                                      output_y_test=True,
                                      seed=seed,
                                      num_run=num_run)
        evaluator.fit()
        signal.signal(15, empty_signal_handler)
        evaluator.finish_up()

    else:
        raise ValueError("Must choose a legal mode.")