def test_cv_early_stopping(self): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits = load_digits(2) X = digits['data'] y = digits['target'] dm = xgb.DMatrix(X, label=y) params = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=10) assert cv.shape[0] == 10 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=5) assert cv.shape[0] == 3 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=1) assert cv.shape[0] == 1 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, early_stopping_rounds=10) assert cv.shape[0] == 10 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, early_stopping_rounds=1) assert cv.shape[0] == 5 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, maximize=True, early_stopping_rounds=1) assert cv.shape[0] == 1
def test_early_stopping_nonparallel(self): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits try: from sklearn.model_selection import train_test_split except: from sklearn.cross_validation import train_test_split digits = load_digits(2) X = digits['data'] y = digits['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf1 = xgb.XGBClassifier() clf1.fit(X_train, y_train, early_stopping_rounds=5, eval_metric="auc", eval_set=[(X_test, y_test)]) clf2 = xgb.XGBClassifier() clf2.fit(X_train, y_train, early_stopping_rounds=4, eval_metric="auc", eval_set=[(X_test, y_test)]) # should be the same assert clf1.best_score == clf2.best_score assert clf1.best_score != 1 # check overfit clf3 = xgb.XGBClassifier() clf3.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc", eval_set=[(X_test, y_test)]) assert clf3.best_score == 1
def test_boston_housing_regression_with_sample_weights(): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error from sklearn.datasets import load_boston from sklearn.cross_validation import KFold boston = load_boston() y = boston['target'] X = boston['data'] sample_weight = np.ones_like(y, 'float') kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf: xgb_model = xgb.XGBRegressor().fit( X[train_index], y[train_index], sample_weight=sample_weight[train_index] ) preds = xgb_model.predict(X[test_index]) # test other params in XGBRegressor().fit preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3) preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0) preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3) labels = y[test_index] assert mean_squared_error(preds, labels) < 25 assert mean_squared_error(preds2, labels) < 370 assert mean_squared_error(preds3, labels) < 25 assert mean_squared_error(preds4, labels) < 370
def test_early_stopping_nonparallel(self): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits from sklearn.cross_validation import train_test_split digits = load_digits(2) X = digits['data'] y = digits['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf1 = xgb.XGBClassifier() clf1.fit(X_train, y_train, early_stopping_rounds=5, eval_metric="auc", eval_set=[(X_test, y_test)]) clf2 = xgb.XGBClassifier() clf2.fit(X_train, y_train, early_stopping_rounds=4, eval_metric="auc", eval_set=[(X_test, y_test)]) # should be the same assert clf1.best_score == clf2.best_score assert clf1.best_score != 1 # check overfit clf3 = xgb.XGBClassifier() clf3.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc", eval_set=[(X_test, y_test)]) assert clf3.best_score == 1
def evalerror_04(self, preds, dtrain): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error labels = dtrain.get_label() return [('error', float(sum(labels != (preds > 0.0))) / len(labels)), ('rmse', mean_squared_error(labels, preds))]
def test_ranking(): tm._skip_if_no_sklearn() # generate random data x_train = np.random.rand(1000, 10) y_train = np.random.randint(5, size=1000) train_group = np.repeat(50, 20) x_valid = np.random.rand(200, 10) y_valid = np.random.randint(5, size=200) valid_group = np.repeat(50, 4) x_test = np.random.rand(100, 10) params = {'objective': 'rank:pairwise', 'learning_rate': 0.1, 'gamma': 1.0, 'min_child_weight': 0.1, 'max_depth': 6, 'n_estimators': 4} model = xgb.sklearn.XGBRanker(**params) model.fit(x_train, y_train, train_group, eval_set=[(x_valid, y_valid)], eval_group=[valid_group]) pred = model.predict(x_test) train_data = xgb.DMatrix(x_train, y_train) valid_data = xgb.DMatrix(x_valid, y_valid) test_data = xgb.DMatrix(x_test) train_data.set_group(train_group) valid_data.set_group(valid_group) params_orig = {'objective': 'rank:pairwise', 'eta': 0.1, 'gamma': 1.0, 'min_child_weight': 0.1, 'max_depth': 6} xgb_model_orig = xgb.train(params_orig, train_data, num_boost_round=4, evals=[(valid_data, 'validation')]) pred_orig = xgb_model_orig.predict(test_data) np.testing.assert_almost_equal(pred, pred_orig)
def test_feature_importances_gain(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits = load_digits(2) y = digits['target'] X = digits['data'] xgb_model = xgb.XGBClassifier(random_state=0, importance_type="gain").fit(X, y) exp = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00326159, 0., 0., 0., 0., 0., 0., 0., 0., 0.00297238, 0.00988034, 0., 0., 0., 0., 0., 0., 0.03512521, 0.41123885, 0., 0., 0., 0., 0.01326332, 0.00160674, 0., 0.4206952, 0., 0., 0., 0., 0.00616747, 0.01237546, 0., 0., 0., 0., 0., 0., 0., 0.08240705, 0., 0., 0., 0., 0., 0., 0., 0.00100649, 0., 0., 0., 0., 0. ], dtype=np.float32) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # numeric columns import pandas as pd y = pd.Series(digits['target']) X = pd.DataFrame(digits['data']) xgb_model = xgb.XGBClassifier(random_state=0, importance_type="gain").fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) xgb_model = xgb.XGBClassifier(random_state=0, importance_type="gain").fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)
def test_multiclass_classification(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris from sklearn.model_selection import KFold def check_pred(preds, labels, output_margin): if output_margin: err = sum(1 for i in range(len(preds)) if preds[i].argmax() != labels[i]) / float(len(preds)) else: err = sum(1 for i in range(len(preds)) if preds[i] != labels[i]) / float(len(preds)) assert err < 0.4 iris = load_iris() y = iris['target'] X = iris['data'] kf = KFold(n_splits=2, shuffle=True, random_state=rng) for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) # test other params in XGBClassifier().fit preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3) preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0) preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3) labels = y[test_index] check_pred(preds, labels, output_margin=False) check_pred(preds2, labels, output_margin=True) check_pred(preds3, labels, output_margin=True) check_pred(preds4, labels, output_margin=False)
def test_save_load_model(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits try: from sklearn.model_selection import KFold except: from sklearn.cross_validation import KFold digits = load_digits(2) y = digits['target'] X = digits['data'] try: kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) except TypeError: # sklearn.model_selection.KFold uses n_split kf = KFold(n_splits=2, shuffle=True, random_state=rng).split(np.arange(y.shape[0])) with TemporaryDirectory() as tempdir: model_path = os.path.join(tempdir, 'digits.model') for train_index, test_index in kf: xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) xgb_model.save_model(model_path) xgb_model = xgb.XGBModel() xgb_model.load_model(model_path) preds = xgb_model.predict(X[test_index]) labels = y[test_index] err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1
def test_multiclass_classification(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris try: from sklearn.cross_validation import KFold except: from sklearn.model_selection import KFold def check_pred(preds, labels): err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.4 iris = load_iris() y = iris['target'] X = iris['data'] kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf: xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) # test other params in XGBClassifier().fit preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3) preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0) preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3) labels = y[test_index] check_pred(preds, labels) check_pred(preds2, labels) check_pred(preds3, labels) check_pred(preds4, labels)
def test_feature_importances(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits = load_digits(2) y = digits['target'] X = digits['data'] xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) exp = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0., 0., 0., 0., 0.025, 0.14166667, 0., 0., 0., 0., 0., 0., 0.00833333, 0.25833333, 0., 0., 0., 0., 0.03333334, 0.03333334, 0., 0.32499999, 0., 0., 0., 0., 0.05, 0.06666667, 0., 0., 0., 0., 0., 0., 0., 0.04166667, 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0. ], dtype=np.float32) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # numeric columns import pandas as pd y = pd.Series(digits['target']) X = pd.DataFrame(digits['data']) xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # string columns, the feature order must be kept chars = list('abcdefghijklmnopqrstuvwxyz') X.columns = ["".join(random.sample(chars, 5)) for x in range(64)] xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)
def test_sklearn_clone(): tm._skip_if_no_sklearn() from sklearn.base import clone clf = xgb.XGBClassifier(n_jobs=2, nthread=3) clf.n_jobs = -1 clone(clf)
def test_feature_importances(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits = load_digits(2) y = digits['target'] X = digits['data'] xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) exp = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0., 0., 0., 0., 0.025, 0.14166667, 0., 0., 0., 0., 0., 0., 0.00833333, 0.25833333, 0., 0., 0., 0., 0.03333334, 0.03333334, 0., 0.32499999, 0., 0., 0., 0., 0.05, 0.06666667, 0., 0., 0., 0., 0., 0., 0., 0.04166667, 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0. ], dtype=np.float32) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # numeric columns import pandas as pd y = pd.Series(digits['target']) X = pd.DataFrame(digits['data']) xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)
def test_sklearn_plotting(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris iris = load_iris() classifier = xgb.XGBClassifier() classifier.fit(iris.data, iris.target) import matplotlib matplotlib.use('Agg') from matplotlib.axes import Axes from graphviz import Digraph ax = xgb.plot_importance(classifier) assert isinstance(ax, Axes) assert ax.get_title() == 'Feature importance' assert ax.get_xlabel() == 'F score' assert ax.get_ylabel() == 'Features' assert len(ax.patches) == 4 g = xgb.to_graphviz(classifier, num_trees=0) assert isinstance(g, Digraph) ax = xgb.plot_tree(classifier, num_trees=0) assert isinstance(ax, Axes)
def test_regression_with_custom_objective(): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error from sklearn.datasets import load_boston from sklearn.cross_validation import KFold def objective_ls(y_true, y_pred): grad = (y_pred - y_true) hess = np.ones(len(y_true)) return grad, hess boston = load_boston() y = boston['target'] X = boston['data'] kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf: xgb_model = xgb.XGBRegressor(objective=objective_ls).fit( X[train_index], y[train_index] ) preds = xgb_model.predict(X[test_index]) labels = y[test_index] assert mean_squared_error(preds, labels) < 25 # Test that the custom objective function is actually used class XGBCustomObjectiveException(Exception): pass def dummy_objective(y_true, y_pred): raise XGBCustomObjectiveException() xgb_model = xgb.XGBRegressor(objective=dummy_objective) np.testing.assert_raises(XGBCustomObjectiveException, xgb_model.fit, X, y)
def test_feature_importances(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits = load_digits(2) y = digits['target'] X = digits['data'] xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) exp = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0., 0., 0., 0., 0.025, 0.14166667, 0., 0., 0., 0., 0., 0., 0.00833333, 0.25833333, 0., 0., 0., 0., 0.03333334, 0.03333334, 0., 0.32499999, 0., 0., 0., 0., 0.05, 0.06666667, 0., 0., 0., 0., 0., 0., 0., 0.04166667, 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0.], dtype=np.float32) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # numeric columns import pandas as pd y = pd.Series(digits['target']) X = pd.DataFrame(digits['data']) xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)
def test_feature_importances(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits = load_digits(2) y = digits['target'] X = digits['data'] xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) exp = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0., 0., 0., 0., 0.025, 0.14166667, 0., 0., 0., 0., 0., 0., 0.00833333, 0.25833333, 0., 0., 0., 0., 0.03333334, 0.03333334, 0., 0.32499999, 0., 0., 0., 0., 0.05, 0.06666667, 0., 0., 0., 0., 0., 0., 0., 0.04166667, 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0.], dtype=np.float32) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # numeric columns import pandas as pd y = pd.Series(digits['target']) X = pd.DataFrame(digits['data']) xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # string columns, the feature order must be kept chars = list('abcdefghijklmnopqrstuvwxyz') X.columns = ["".join(random.sample(chars, 5)) for x in range(64)] xgb_model = xgb.XGBClassifier(seed=0).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)
def test_regression_with_custom_objective(): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error from sklearn.datasets import load_boston from sklearn.cross_validation import KFold def objective_ls(y_true, y_pred): grad = (y_pred - y_true) hess = np.ones(len(y_true)) return grad, hess boston = load_boston() y = boston['target'] X = boston['data'] kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf: xgb_model = xgb.XGBRegressor(objective=objective_ls).fit( X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) labels = y[test_index] assert mean_squared_error(preds, labels) < 25 # Test that the custom objective function is actually used class XGBCustomObjectiveException(Exception): pass def dummy_objective(y_true, y_pred): raise XGBCustomObjectiveException() xgb_model = xgb.XGBRegressor(objective=dummy_objective) np.testing.assert_raises(XGBCustomObjectiveException, xgb_model.fit, X, y)
def test_boston_housing_regression(): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error from sklearn.datasets import load_boston from sklearn.cross_validation import KFold boston = load_boston() y = boston['target'] X = boston['data'] kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf: xgb_model = xgb.XGBRegressor().fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) # test other params in XGBRegressor().fit preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3) preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0) preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3) labels = y[test_index] assert mean_squared_error(preds, labels) < 25 assert mean_squared_error(preds2, labels) < 350 assert mean_squared_error(preds3, labels) < 25 assert mean_squared_error(preds4, labels) < 350
def test_classification_with_custom_objective(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits from sklearn.cross_validation import KFold def logregobj(y_true, y_pred): y_pred = 1.0 / (1.0 + np.exp(-y_pred)) grad = y_pred - y_true hess = y_pred * (1.0 - y_pred) return grad, hess digits = load_digits(2) y = digits['target'] X = digits['data'] kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf: xgb_model = xgb.XGBClassifier(objective=logregobj) xgb_model.fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) labels = y[test_index] err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1 # Test that the custom objective function is actually used class XGBCustomObjectiveException(Exception): pass def dummy_objective(y_true, y_preds): raise XGBCustomObjectiveException() xgb_model = xgb.XGBClassifier(objective=dummy_objective) np.testing.assert_raises(XGBCustomObjectiveException, xgb_model.fit, X, y)
def test_fast_histmaker(self): tm._skip_if_no_sklearn() variable_param = {'tree_method': ['hist'], 'max_depth': [2, 8], 'max_bin': [2, 256], 'grow_policy': ['depthwise', 'lossguide'], 'max_leaves': [64, 0], 'silent': [1]} for param in parameter_combinations(variable_param): result = run_suite(param) assert_results_non_increasing(result, 1e-2) # hist must be same as exact on all-categorial data dpath = 'demo/data/' ag_dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train') ag_dtest = xgb.DMatrix(dpath + 'agaricus.txt.test') ag_param = {'max_depth': 2, 'tree_method': 'hist', 'eta': 1, 'silent': 1, 'objective': 'binary:logistic', 'eval_metric': 'auc'} hist_res = {} exact_res = {} xgb.train(ag_param, ag_dtrain, 10, [(ag_dtrain, 'train'), (ag_dtest, 'test')], evals_result=hist_res) ag_param["tree_method"] = "exact" xgb.train(ag_param, ag_dtrain, 10, [(ag_dtrain, 'train'), (ag_dtest, 'test')], evals_result=exact_res) assert hist_res['train']['auc'] == exact_res['train']['auc'] assert hist_res['test']['auc'] == exact_res['test']['auc']
def test_split_value_histograms(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits_2class = load_digits(2) X = digits_2class['data'] y = digits_2class['target'] dm = xgb.DMatrix(X, label=y) params = { 'max_depth': 6, 'eta': 0.01, 'silent': 1, 'objective': 'binary:logistic' } gbdt = xgb.train(params, dm, num_boost_round=10) assert gbdt.get_split_value_histogram("not_there", as_pandas=True).shape[0] == 0 assert gbdt.get_split_value_histogram("not_there", as_pandas=False).shape[0] == 0 assert gbdt.get_split_value_histogram("f28", bins=0).shape[0] == 1 assert gbdt.get_split_value_histogram("f28", bins=1).shape[0] == 1 assert gbdt.get_split_value_histogram("f28", bins=2).shape[0] == 2 assert gbdt.get_split_value_histogram("f28", bins=5).shape[0] == 2 assert gbdt.get_split_value_histogram("f28", bins=None).shape[0] == 2
def test_sklearn_nfolds_cv(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits from sklearn.model_selection import StratifiedKFold digits = load_digits(3) X = digits['data'] y = digits['target'] dm = xgb.DMatrix(X, label=y) params = { 'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'multi:softprob', 'num_class': 3 } seed = 2016 nfolds = 5 skf = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=seed) cv1 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, seed=seed) cv2 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, folds=skf, seed=seed) cv3 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, stratified=True, seed=seed) assert cv1.shape[0] == cv2.shape[0] and cv2.shape[0] == cv3.shape[0] assert cv2.iloc[-1, 0] == cv3.iloc[-1, 0]
def test_sklearn_n_jobs(): tm._skip_if_no_sklearn() clf = xgb.XGBClassifier(n_jobs=1) assert clf.get_xgb_params()['nthread'] == 1 clf = xgb.XGBClassifier(nthread=2) assert clf.get_xgb_params()['nthread'] == 2
def test_sklearn_random_state(): tm._skip_if_no_sklearn() clf = xgb.XGBClassifier(random_state=402) assert clf.get_params()['seed'] == 402 clf = xgb.XGBClassifier(seed=401) assert clf.get_params()['seed'] == 401
def test_kwargs(): tm._skip_if_no_sklearn() params = {'updater': 'grow_gpu', 'subsample': .5, 'n_jobs': -1} clf = xgb.XGBClassifier(n_estimators=1000, **params) assert clf.get_params()['updater'] == 'grow_gpu' assert clf.get_params()['subsample'] == .5 assert clf.get_params()['n_estimators'] == 1000
def test_sklearn_n_jobs(): tm._skip_if_no_sklearn() clf = xgb.XGBClassifier(n_jobs=1) assert clf.get_params()['nthread'] == 1 clf = xgb.XGBClassifier(nthread=2) assert clf.get_params()['nthread'] == 2
def test_sklearn_random_state(): tm._skip_if_no_sklearn() clf = xgb.XGBClassifier(random_state=402) assert clf.get_xgb_params()['seed'] == 402 clf = xgb.XGBClassifier(seed=401) assert clf.get_xgb_params()['seed'] == 401
def test_coordinate(self): tm._skip_if_no_sklearn() variable_param = { 'alpha': [.005, .1], 'lambda': [.005], 'feature_selector': ['cyclic', 'shuffle', 'greedy', 'thrifty'] } assert_updater_accuracy('coord_descent', variable_param)
def test_gpu_coordinate(self): tm._skip_if_no_sklearn() variable_param = {'booster': ['gblinear'], 'updater': ['coord_descent'], 'eta': [0.5], 'top_k': [10], 'tolerance': [1e-5], 'nthread': [2], 'alpha': [.005, .1], 'lambda': [0.005], 'coordinate_selection': ['cyclic', 'random', 'greedy'], 'n_gpus': [-1]} for param in test_linear.parameter_combinations(variable_param): results = test_linear.run_suite(param, 200, None, scale_features=True) test_linear.assert_regression_result(results, 1e-2) test_linear.assert_classification_result(results)
def test_cv_early_stopping(self): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits = load_digits(2) X = digits['data'] y = digits['target'] dm = xgb.DMatrix(X, label=y) params = { 'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic' } cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=10) assert cv.shape[0] == 10 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=5) assert cv.shape[0] == 3 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=1) assert cv.shape[0] == 1 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, early_stopping_rounds=10) assert cv.shape[0] == 10 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, early_stopping_rounds=1) assert cv.shape[0] == 5 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, maximize=True, early_stopping_rounds=1) assert cv.shape[0] == 1
def test_shotgun(self): tm._skip_if_no_sklearn() variable_param = {'booster': ['gblinear'], 'updater': ['shotgun'], 'eta': [0.5], 'top_k': [10], 'tolerance': [1e-5], 'nthread': [2], 'alpha': [.005, .1], 'lambda': [.005], 'feature_selector': ['cyclic', 'shuffle'] } for param in parameter_combinations(variable_param): results = run_suite(param, 200, None, True) assert_regression_result(results, 1e-2) assert_classification_result(results)
def test_parameter_tuning(): tm._skip_if_no_sklearn() from sklearn.grid_search import GridSearchCV from sklearn.datasets import load_boston boston = load_boston() y = boston['target'] X = boston['data'] xgb_model = xgb.XGBRegressor() clf = GridSearchCV(xgb_model, {'max_depth': [2, 4, 6], 'n_estimators': [50, 100, 200]}, verbose=1) clf.fit(X, y) assert clf.best_score_ < 0.7 assert clf.best_params_ == {'n_estimators': 100, 'max_depth': 4}
def test_validation_weights_xgbclassifier(): tm._skip_if_no_sklearn() from sklearn.datasets import make_hastie_10_2 # prepare training and test data X, y = make_hastie_10_2(n_samples=2000, random_state=42) labels, y = np.unique(y, return_inverse=True) X_train, X_test = X[:1600], X[1600:] y_train, y_test = y[:1600], y[1600:] # instantiate model param_dist = { 'objective': 'binary:logistic', 'n_estimators': 2, 'random_state': 123 } clf = xgb.sklearn.XGBClassifier(**param_dist) # train it using instance weights only in the training set weights_train = np.random.choice([1, 2], len(X_train)) clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_test, y_test)], eval_metric='logloss', verbose=False) # evaluate logloss metric on test set *without* using weights evals_result_without_weights = clf.evals_result() logloss_without_weights = evals_result_without_weights["validation_0"][ "logloss"] # now use weights for the test set np.random.seed(0) weights_test = np.random.choice([1, 2], len(X_test)) clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_test, y_test)], sample_weight_eval_set=[weights_test], eval_metric='logloss', verbose=False) evals_result_with_weights = clf.evals_result() logloss_with_weights = evals_result_with_weights["validation_0"]["logloss"] # check that the logloss in the test set is actually different when using weights # than when not using them assert all((logloss_with_weights[i] != logloss_without_weights[i] for i in [0, 1]))
def test_RFECV(): tm._skip_if_no_sklearn() from sklearn.datasets import load_boston from sklearn.datasets import load_breast_cancer from sklearn.datasets import load_iris from sklearn.feature_selection import RFECV # Regression X, y = load_boston(return_X_y=True) bst = xgb.XGBClassifier(booster='gblinear', learning_rate=0.1, n_estimators=10, n_jobs=1, objective='reg:linear', random_state=0, silent=True) rfecv = RFECV(estimator=bst, step=1, cv=3, scoring='neg_mean_squared_error') rfecv.fit(X, y) # Binary classification X, y = load_breast_cancer(return_X_y=True) bst = xgb.XGBClassifier(booster='gblinear', learning_rate=0.1, n_estimators=10, n_jobs=1, objective='binary:logistic', random_state=0, silent=True) rfecv = RFECV(estimator=bst, step=1, cv=3, scoring='roc_auc') rfecv.fit(X, y) # Multi-class classification X, y = load_iris(return_X_y=True) bst = xgb.XGBClassifier(base_score=0.4, booster='gblinear', learning_rate=0.1, n_estimators=10, n_jobs=1, objective='multi:softprob', random_state=0, reg_alpha=0.001, reg_lambda=0.01, scale_pos_weight=0.5, silent=True) rfecv = RFECV(estimator=bst, step=1, cv=3, scoring='neg_log_loss') rfecv.fit(X, y)
def test_sklearn_api_gblinear(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris from sklearn.cross_validation import train_test_split iris = load_iris() tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120) classifier = xgb.XGBClassifier(booster='gblinear', n_estimators=100) classifier.fit(tr_d, tr_l) preds = classifier.predict(te_d) labels = te_l err = sum([1 for p, l in zip(preds, labels) if p != l]) * 1.0 / len(te_l) assert err < 0.2
def test_eval_metrics(self): tm._skip_if_no_sklearn() try: from sklearn.model_selection import train_test_split except: from sklearn.cross_validation import train_test_split from sklearn.datasets import load_digits digits = load_digits(2) X = digits['data'] y = digits['target'] Xt, Xv, yt, yv = train_test_split(X, y, test_size=0.2, random_state=0) dtrain = xgb.DMatrix(Xt, label=yt) dvalid = xgb.DMatrix(Xv, label=yv) watchlist = [(dtrain, 'train'), (dvalid, 'val')] gbdt_01 = xgb.train(self.xgb_params_01, dtrain, num_boost_round=10) gbdt_02 = xgb.train(self.xgb_params_02, dtrain, num_boost_round=10) gbdt_03 = xgb.train(self.xgb_params_03, dtrain, num_boost_round=10) assert gbdt_01.predict(dvalid)[0] == gbdt_02.predict(dvalid)[0] assert gbdt_01.predict(dvalid)[0] == gbdt_03.predict(dvalid)[0] gbdt_01 = xgb.train(self.xgb_params_01, dtrain, 10, watchlist, early_stopping_rounds=2) gbdt_02 = xgb.train(self.xgb_params_02, dtrain, 10, watchlist, early_stopping_rounds=2) gbdt_03 = xgb.train(self.xgb_params_03, dtrain, 10, watchlist, early_stopping_rounds=2) gbdt_04 = xgb.train(self.xgb_params_04, dtrain, 10, watchlist, early_stopping_rounds=2) assert gbdt_01.predict(dvalid)[0] == gbdt_02.predict(dvalid)[0] assert gbdt_01.predict(dvalid)[0] == gbdt_03.predict(dvalid)[0] assert gbdt_03.predict(dvalid)[0] != gbdt_04.predict(dvalid)[0] gbdt_01 = xgb.train(self.xgb_params_01, dtrain, 10, watchlist, early_stopping_rounds=2, feval=self.evalerror_01) gbdt_02 = xgb.train(self.xgb_params_02, dtrain, 10, watchlist, early_stopping_rounds=2, feval=self.evalerror_02) gbdt_03 = xgb.train(self.xgb_params_03, dtrain, 10, watchlist, early_stopping_rounds=2, feval=self.evalerror_03) gbdt_04 = xgb.train(self.xgb_params_04, dtrain, 10, watchlist, early_stopping_rounds=2, feval=self.evalerror_04) assert gbdt_01.predict(dvalid)[0] == gbdt_02.predict(dvalid)[0] assert gbdt_01.predict(dvalid)[0] == gbdt_03.predict(dvalid)[0] assert gbdt_03.predict(dvalid)[0] != gbdt_04.predict(dvalid)[0]
def test_binary_classification(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits from sklearn.cross_validation import KFold digits = load_digits(2) y = digits['target'] X = digits['data'] kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf: xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) labels = y[test_index] err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1
def test_coordinate(self): tm._skip_if_no_sklearn() variable_param = { 'booster': ['gblinear'], 'updater': ['coord_descent'], 'eta': [0.5], 'top_k': [10], 'tolerance': [1e-5], 'nthread': [2], 'alpha': [.005, .1], 'lambda': [.005], 'feature_selector': ['cyclic', 'shuffle', 'greedy', 'thrifty'] } for param in parameter_combinations(variable_param): results = run_suite(param, 200, self.datasets, scale_features=True) assert_regression_result(results, 1e-2) assert_classification_result(results)
def test_kwargs_grid_search(): tm._skip_if_no_sklearn() from sklearn.model_selection import GridSearchCV from sklearn import datasets params = {'tree_method': 'hist'} clf = xgb.XGBClassifier(n_estimators=1, learning_rate=1.0, **params) assert clf.get_params()['tree_method'] == 'hist' # 'max_leaves' is not a default argument of XGBClassifier # Check we can still do grid search over this parameter search_params = {'max_leaves': range(2, 5)} grid_cv = GridSearchCV(clf, search_params, cv=5) iris = datasets.load_iris() grid_cv.fit(iris.data, iris.target) # Expect unique results for each parameter value # This confirms sklearn is able to successfully update the parameter means = grid_cv.cv_results_['mean_test_score'] assert len(means) == len(set(means))
def test_validation_weights_xgbclassifier(): tm._skip_if_no_sklearn() from sklearn.datasets import make_hastie_10_2 # prepare training and test data X, y = make_hastie_10_2(n_samples=2000, random_state=42) labels, y = np.unique(y, return_inverse=True) X_train, X_test = X[:1600], X[1600:] y_train, y_test = y[:1600], y[1600:] # instantiate model param_dist = {'objective': 'binary:logistic', 'n_estimators': 2, 'random_state': 123} clf = xgb.sklearn.XGBClassifier(**param_dist) # train it using instance weights only in the training set weights_train = np.random.choice([1, 2], len(X_train)) clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_test, y_test)], eval_metric='logloss', verbose=False) # evaluate logloss metric on test set *without* using weights evals_result_without_weights = clf.evals_result() logloss_without_weights = evals_result_without_weights["validation_0"]["logloss"] # now use weights for the test set np.random.seed(0) weights_test = np.random.choice([1, 2], len(X_test)) clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_test, y_test)], sample_weight_eval_set=[weights_test], eval_metric='logloss', verbose=False) evals_result_with_weights = clf.evals_result() logloss_with_weights = evals_result_with_weights["validation_0"]["logloss"] # check that the logloss in the test set is actually different when using weights # than when not using them assert all((logloss_with_weights[i] != logloss_without_weights[i] for i in [0, 1]))
def test_split_value_histograms(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits digits_2class = load_digits(2) X = digits_2class['data'] y = digits_2class['target'] dm = xgb.DMatrix(X, label=y) params = {'max_depth': 6, 'eta': 0.01, 'silent': 1, 'objective': 'binary:logistic'} gbdt = xgb.train(params, dm, num_boost_round=10) assert gbdt.get_split_value_histogram("not_there", as_pandas=True).shape[0] == 0 assert gbdt.get_split_value_histogram("not_there", as_pandas=False).shape[0] == 0 assert gbdt.get_split_value_histogram("f28", bins=0).shape[0] == 1 assert gbdt.get_split_value_histogram("f28", bins=1).shape[0] == 1 assert gbdt.get_split_value_histogram("f28", bins=2).shape[0] == 2 assert gbdt.get_split_value_histogram("f28", bins=5).shape[0] == 2 assert gbdt.get_split_value_histogram("f28", bins=None).shape[0] == 2
def test_classification_with_custom_objective(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits from sklearn.cross_validation import KFold def logregobj(y_true, y_pred): y_pred = 1.0 / (1.0 + np.exp(-y_pred)) grad = y_pred - y_true hess = y_pred * (1.0 - y_pred) return grad, hess digits = load_digits(2) y = digits['target'] X = digits['data'] kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf: xgb_model = xgb.XGBClassifier(objective=logregobj) xgb_model.fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) labels = y[test_index] err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1 # Test that the custom objective function is actually used class XGBCustomObjectiveException(Exception): pass def dummy_objective(y_true, y_preds): raise XGBCustomObjectiveException() xgb_model = xgb.XGBClassifier(objective=dummy_objective) np.testing.assert_raises( XGBCustomObjectiveException, xgb_model.fit, X, y )
def test_seed_deprecation(): tm._skip_if_no_sklearn() warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: xgb.XGBClassifier(seed=1) assert w[0].category == DeprecationWarning
def test_coordinate(self): tm._skip_if_no_sklearn() variable_param = {'alpha': [.005, .1], 'lambda': [.005], 'feature_selector': ['cyclic', 'shuffle', 'greedy', 'thrifty']} assert_updater_accuracy('coord_descent', variable_param)
def test_shotgun(self): tm._skip_if_no_sklearn() variable_param = {'alpha': [.005, .1], 'lambda': [.005, .1]} assert_updater_accuracy('shotgun', variable_param)
def test_kwargs_error(): tm._skip_if_no_sklearn() params = {'updater': 'grow_gpu', 'subsample': .5, 'n_jobs': -1} clf = xgb.XGBClassifier(n_jobs=1000, **params) assert isinstance(clf, xgb.XGBClassifier)
def test_nthread_deprecation(): tm._skip_if_no_sklearn() warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: xgb.XGBClassifier(nthread=1).get_xgb_params() assert w[0].category == DeprecationWarning
def test_grow_gpu(self): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits try: from sklearn.model_selection import train_test_split except: from sklearn.cross_validation import train_test_split ag_param = {'max_depth': 2, 'tree_method': 'exact', 'nthread': 1, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic', 'eval_metric': 'auc'} ag_param2 = {'max_depth': 2, 'updater': 'grow_gpu', 'eta': 1, 'silent': 1, 'objective': 'binary:logistic', 'eval_metric': 'auc'} ag_res = {} ag_res2 = {} num_rounds = 10 xgb.train(ag_param, ag_dtrain, num_rounds, [(ag_dtrain, 'train'), (ag_dtest, 'test')], evals_result=ag_res) xgb.train(ag_param2, ag_dtrain, num_rounds, [(ag_dtrain, 'train'), (ag_dtest, 'test')], evals_result=ag_res2) assert ag_res['train']['auc'] == ag_res2['train']['auc'] assert ag_res['test']['auc'] == ag_res2['test']['auc'] digits = load_digits(2) X = digits['data'] y = digits['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) dtrain = xgb.DMatrix(X_train, y_train) dtest = xgb.DMatrix(X_test, y_test) param = {'objective': 'binary:logistic', 'updater': 'grow_gpu', 'max_depth': 3, 'eval_metric': 'auc'} res = {} xgb.train(param, dtrain, 10, [(dtrain, 'train'), (dtest, 'test')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert self.non_decreasing(res['test']['auc']) # fail-safe test for dense data from sklearn.datasets import load_svmlight_file X2, y2 = load_svmlight_file(dpath + 'agaricus.txt.train') X2 = X2.toarray() dtrain2 = xgb.DMatrix(X2, label=y2) param = {'objective': 'binary:logistic', 'updater': 'grow_gpu', 'max_depth': 2, 'eval_metric': 'auc'} res = {} xgb.train(param, dtrain2, 10, [(dtrain2, 'train')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert res['train']['auc'][0] >= 0.85 for j in range(X2.shape[1]): for i in rng.choice(X2.shape[0], size=10, replace=False): X2[i, j] = 2 dtrain3 = xgb.DMatrix(X2, label=y2) res = {} xgb.train(param, dtrain3, num_rounds, [(dtrain3, 'train')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert res['train']['auc'][0] >= 0.85 for j in range(X2.shape[1]): for i in np.random.choice(X2.shape[0], size=10, replace=False): X2[i, j] = 3 dtrain4 = xgb.DMatrix(X2, label=y2) res = {} xgb.train(param, dtrain4, 10, [(dtrain4, 'train')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert res['train']['auc'][0] >= 0.85
def test_fast_hist(self): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits from sklearn.cross_validation import train_test_split digits = load_digits(2) X = digits['data'] y = digits['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) dtrain = xgb.DMatrix(X_train, y_train) dtest = xgb.DMatrix(X_test, y_test) param = {'objective': 'binary:logistic', 'tree_method': 'hist', 'grow_policy': 'depthwise', 'max_depth': 3, 'eval_metric': 'auc'} res = {} xgb.train(param, dtrain, 10, [(dtrain, 'train'), (dtest, 'test')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert self.non_decreasing(res['test']['auc']) param2 = {'objective': 'binary:logistic', 'tree_method': 'hist', 'grow_policy': 'lossguide', 'max_depth': 0, 'max_leaves': 8, 'eval_metric': 'auc'} res = {} xgb.train(param2, dtrain, 10, [(dtrain, 'train'), (dtest, 'test')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert self.non_decreasing(res['test']['auc']) param3 = {'objective': 'binary:logistic', 'tree_method': 'hist', 'grow_policy': 'lossguide', 'max_depth': 0, 'max_leaves': 8, 'max_bin': 16, 'eval_metric': 'auc'} res = {} xgb.train(param3, dtrain, 10, [(dtrain, 'train'), (dtest, 'test')], evals_result=res) assert self.non_decreasing(res['train']['auc']) # fail-safe test for dense data from sklearn.datasets import load_svmlight_file dpath = 'demo/data/' X2, y2 = load_svmlight_file(dpath + 'agaricus.txt.train') X2 = X2.toarray() dtrain2 = xgb.DMatrix(X2, label=y2) param = {'objective': 'binary:logistic', 'tree_method': 'hist', 'grow_policy': 'depthwise', 'max_depth': 2, 'eval_metric': 'auc'} res = {} xgb.train(param, dtrain2, 10, [(dtrain2, 'train')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert res['train']['auc'][0] >= 0.85 for j in range(X2.shape[1]): for i in np.random.choice(X2.shape[0], size=10, replace=False): X2[i, j] = 2 dtrain3 = xgb.DMatrix(X2, label=y2) res = {} xgb.train(param, dtrain3, 10, [(dtrain3, 'train')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert res['train']['auc'][0] >= 0.85 for j in range(X2.shape[1]): for i in np.random.choice(X2.shape[0], size=10, replace=False): X2[i, j] = 3 dtrain4 = xgb.DMatrix(X2, label=y2) res = {} xgb.train(param, dtrain4, 10, [(dtrain4, 'train')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert res['train']['auc'][0] >= 0.85 # fail-safe test for max_bin=2 param = {'objective': 'binary:logistic', 'tree_method': 'hist', 'grow_policy': 'depthwise', 'max_depth': 2, 'eval_metric': 'auc', 'max_bin': 2} res = {} xgb.train(param, dtrain2, 10, [(dtrain2, 'train')], evals_result=res) assert self.non_decreasing(res['train']['auc']) assert res['train']['auc'][0] >= 0.85