Exemple #1
0
def test_multimetric_scorer_calls_method_once(scorers, expected_predict_count,
                                              expected_predict_proba_count,
                                              expected_decision_func_count):
    X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])

    mock_est = Mock()
    fit_func = Mock(return_value=mock_est)
    predict_func = Mock(return_value=y)

    pos_proba = np.random.rand(X.shape[0])
    proba = np.c_[1 - pos_proba, pos_proba]
    predict_proba_func = Mock(return_value=proba)
    decision_function_func = Mock(return_value=pos_proba)

    mock_est.fit = fit_func
    mock_est.predict = predict_func
    mock_est.predict_proba = predict_proba_func
    mock_est.decision_function = decision_function_func

    scorer_dict, _ = _check_multimetric_scoring(LogisticRegression(), scorers)
    multi_scorer = _MultimetricScorer(**scorer_dict)
    results = multi_scorer(mock_est, X, y)

    assert set(scorers) == set(results)  # compare dict keys

    assert predict_func.call_count == expected_predict_count
    assert predict_proba_func.call_count == expected_predict_proba_count
    assert decision_function_func.call_count == expected_decision_func_count
Exemple #2
0
def test_multimetric_scorer_sanity_check():
    # scoring dictionary returned is the same as calling each scorer seperately
    scorers = {
        'a1': 'accuracy',
        'a2': 'accuracy',
        'll1': 'neg_log_loss',
        'll2': 'neg_log_loss',
        'ra1': 'roc_auc',
        'ra2': 'roc_auc'
    }

    X, y = make_classification(random_state=0)

    clf = DecisionTreeClassifier()
    clf.fit(X, y)

    scorer_dict, _ = _check_multimetric_scoring(clf, scorers)
    multi_scorer = _MultimetricScorer(**scorer_dict)

    result = multi_scorer(clf, X, y)

    seperate_scores = {
        name: get_scorer(name)(clf, X, y)
        for name in ['accuracy', 'neg_log_loss', 'roc_auc']
    }

    for key, value in result.items():
        score_name = scorers[key]
        assert_allclose(value, seperate_scores[score_name])
Exemple #3
0
def test_multimetric_scorer_calls_method_once_regressor_threshold():
    predict_called_cnt = 0

    class MockDecisionTreeRegressor(DecisionTreeRegressor):
        def predict(self, X):
            nonlocal predict_called_cnt
            predict_called_cnt += 1
            return super().predict(X)

    X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])

    # no decision function
    clf = MockDecisionTreeRegressor()
    clf.fit(X, y)

    scorers = {'neg_mse': 'neg_mean_squared_error', 'r2': 'roc_auc'}
    scorer_dict, _ = _check_multimetric_scoring(clf, scorers)
    scorer = _MultimetricScorer(**scorer_dict)
    scorer(clf, X, y)

    assert predict_called_cnt == 1
Exemple #4
0
def test_multimetric_scorer_calls_method_once_classifier_no_decision():
    predict_proba_call_cnt = 0

    class MockKNeighborsClassifier(KNeighborsClassifier):
        def predict_proba(self, X):
            nonlocal predict_proba_call_cnt
            predict_proba_call_cnt += 1
            return super().predict_proba(X)

    X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])

    # no decision function
    clf = MockKNeighborsClassifier(n_neighbors=1)
    clf.fit(X, y)

    scorers = ['roc_auc', 'neg_log_loss']
    scorer_dict, _ = _check_multimetric_scoring(clf, scorers)
    scorer = _MultimetricScorer(**scorer_dict)
    scorer(clf, X, y)

    assert predict_proba_call_cnt == 1