예제 #1
0
def test_same_predictions_classification(seed, min_samples_leaf, n_samples,
                                         max_leaf_nodes):
    # Same as test_same_predictions_regression but for classification

    rng = np.random.RandomState(seed=seed)
    n_samples = n_samples
    max_iter = 1
    max_bins = 256

    X, y = make_classification(n_samples=n_samples,
                               n_classes=2,
                               n_features=5,
                               n_informative=5,
                               n_redundant=0,
                               random_state=0)

    if n_samples > 255:
        # bin data and convert it to float32 so that the estimator doesn't
        # treat it as pre-binned
        X = BinMapper(max_bins=max_bins).fit_transform(X).astype(np.float32)

    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)

    est_pygbm = GradientBoostingClassifier(loss='binary_crossentropy',
                                           max_iter=max_iter,
                                           max_bins=max_bins,
                                           learning_rate=1,
                                           n_iter_no_change=None,
                                           min_samples_leaf=min_samples_leaf,
                                           max_leaf_nodes=max_leaf_nodes)
    est_lightgbm = get_lightgbm_estimator(est_pygbm)

    est_lightgbm.fit(X_train, y_train)
    est_pygbm.fit(X_train, y_train)

    # We need X to be treated an numerical data, not pre-binned data.
    X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)

    pred_lightgbm = est_lightgbm.predict(X_train)
    pred_pygbm = est_pygbm.predict(X_train)
    assert np.mean(pred_pygbm == pred_lightgbm) > .89

    acc_lgbm = accuracy_score(y_train, pred_lightgbm)
    acc_pygbm = accuracy_score(y_train, pred_pygbm)
    np.testing.assert_almost_equal(acc_lgbm, acc_pygbm)

    if max_leaf_nodes < 10 and n_samples >= 1000:

        pred_lightgbm = est_lightgbm.predict(X_test)
        pred_pygbm = est_pygbm.predict(X_test)
        assert np.mean(pred_pygbm == pred_lightgbm) > .89

        acc_lgbm = accuracy_score(y_test, pred_lightgbm)
        acc_pygbm = accuracy_score(y_test, pred_pygbm)
        np.testing.assert_almost_equal(acc_lgbm, acc_pygbm, decimal=2)
예제 #2
0
def test_same_predictions_classification(seed, min_samples_leaf, n_samples,
                                         max_leaf_nodes):
    # Same as test_same_predictions_regression but for classification

    rng = np.random.RandomState(seed=seed)
    n_samples = n_samples
    max_iter = 1
    max_bins = 256

    X, y = make_classification(n_samples=n_samples,
                               n_classes=2,
                               n_features=5,
                               n_informative=5,
                               n_redundant=0,
                               random_state=0)

    if n_samples > 255:
        X = BinMapper(max_bins=max_bins).fit_transform(X)

    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)

    est_pygbm = GradientBoostingClassifier(loss='binary_crossentropy',
                                           max_iter=max_iter,
                                           max_bins=max_bins,
                                           learning_rate=1,
                                           validation_split=None,
                                           scoring=None,
                                           min_samples_leaf=min_samples_leaf,
                                           max_leaf_nodes=max_leaf_nodes)
    est_lightgbm = get_lightgbm_estimator(est_pygbm)

    est_lightgbm.fit(X_train, y_train)
    est_pygbm.fit(X_train, y_train)

    pred_lightgbm = est_lightgbm.predict(X_train)
    pred_pygbm = est_pygbm.predict(X_train)
    assert np.mean(pred_pygbm == pred_lightgbm) > .89

    acc_lgbm = accuracy_score(y_train, pred_lightgbm)
    acc_pygbm = accuracy_score(y_train, pred_pygbm)
    np.testing.assert_almost_equal(acc_lgbm, acc_pygbm)

    if max_leaf_nodes < 10 and n_samples >= 1000:

        pred_lightgbm = est_lightgbm.predict(X_test)
        pred_pygbm = est_pygbm.predict(X_test)
        assert np.mean(pred_pygbm == pred_lightgbm) > .89

        acc_lgbm = accuracy_score(y_test, pred_lightgbm)
        acc_pygbm = accuracy_score(y_test, pred_pygbm)
        np.testing.assert_almost_equal(acc_lgbm, acc_pygbm, decimal=2)
예제 #3
0
n_samples, n_features = data_train.shape
print(f"Training set with {n_samples} records with {n_features} features.")

print("JIT compiling code for the pygbm model...")
tic = time()
pygbm_model = GradientBoostingClassifier(learning_rate=lr,
                                         max_iter=1,
                                         max_bins=max_bins,
                                         max_leaf_nodes=n_leaf_nodes,
                                         random_state=0,
                                         scoring=None,
                                         verbose=0,
                                         validation_split=None)
pygbm_model.fit(data_train[:100], target_train[:100])
pygbm_model.predict(data_train[:100])  # prediction code is also jitted
toc = time()
print(f"done in {toc - tic:.3f}s")

print("Fitting a pygbm model...")
tic = time()
pygbm_model = GradientBoostingClassifier(loss='binary_crossentropy',
                                         learning_rate=lr,
                                         max_iter=n_trees,
                                         max_bins=max_bins,
                                         max_leaf_nodes=n_leaf_nodes,
                                         random_state=0,
                                         scoring=None,
                                         verbose=1,
                                         validation_split=None)
pygbm_model.fit(data_train, target_train)
예제 #4
0
def test_same_predictions_multiclass_classification(seed, min_samples_leaf,
                                                    n_samples, max_leaf_nodes):
    # Same as test_same_predictions_regression but for classification

    rng = np.random.RandomState(seed=seed)
    n_samples = n_samples
    max_iter = 1
    max_bins = 256
    lr = 1

    X, y = make_classification(n_samples=n_samples,
                               n_classes=3,
                               n_features=5,
                               n_informative=5,
                               n_redundant=0,
                               n_clusters_per_class=1,
                               random_state=0)

    if n_samples > 255:
        X = BinMapper(max_bins=max_bins).fit_transform(X)

    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)

    est_pygbm = GradientBoostingClassifier(loss='categorical_crossentropy',
                                           max_iter=max_iter,
                                           max_bins=max_bins,
                                           learning_rate=lr,
                                           validation_split=None,
                                           scoring=None,
                                           min_samples_leaf=min_samples_leaf,
                                           max_leaf_nodes=max_leaf_nodes)
    est_lightgbm = get_lightgbm_estimator(est_pygbm)

    est_lightgbm.fit(X_train, y_train)
    est_pygbm.fit(X_train, y_train)

    pred_lightgbm = est_lightgbm.predict(X_train)
    pred_pygbm = est_pygbm.predict(X_train)
    assert np.mean(pred_pygbm == pred_lightgbm) > .89

    proba_lightgbm = est_lightgbm.predict_proba(X_train)
    proba_pygbm = est_pygbm.predict_proba(X_train)
    # assert more than 75% of the predicted probabilities are the same up to
    # the second decimal
    assert np.mean(np.abs(proba_lightgbm - proba_pygbm) < 1e-2) > .75

    acc_lgbm = accuracy_score(y_train, pred_lightgbm)
    acc_pygbm = accuracy_score(y_train, pred_pygbm)
    np.testing.assert_almost_equal(acc_lgbm, acc_pygbm, decimal=2)

    if max_leaf_nodes < 10 and n_samples >= 1000:

        pred_lightgbm = est_lightgbm.predict(X_test)
        pred_pygbm = est_pygbm.predict(X_test)
        assert np.mean(pred_pygbm == pred_lightgbm) > .89

        proba_lightgbm = est_lightgbm.predict_proba(X_train)
        proba_pygbm = est_pygbm.predict_proba(X_train)
        # assert more than 75% of the predicted probabilities are the same up
        # to the second decimal
        assert np.mean(np.abs(proba_lightgbm - proba_pygbm) < 1e-2) > .75

        acc_lgbm = accuracy_score(y_test, pred_lightgbm)
        acc_pygbm = accuracy_score(y_test, pred_pygbm)
        np.testing.assert_almost_equal(acc_lgbm, acc_pygbm, decimal=2)