예제 #1
0
def test_class_weight(queue):
    X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
    y = np.array([1, 1, 1, 2, 2, 2])

    clf = SVC(class_weight={1: 0.1})
    clf.fit(X, y, queue=queue)
    assert_array_almost_equal(clf.predict(X, queue=queue), [2] * 6)
예제 #2
0
def test_sample_weight(queue):
    X = np.array([[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 2]])
    y = np.array([1, 1, 1, 2, 2, 2])

    clf = SVC(kernel='linear')
    clf.fit(X, y, sample_weight=[1] * 6, queue=queue)
    assert_array_almost_equal(clf.intercept_, [0.0])
예제 #3
0
def test_decision_function_shape():
    X, y = make_blobs(n_samples=80, centers=5, random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

    # check shape of ovo_decition_function=True
    clf = SVC(kernel='linear',
              decision_function_shape='ovo').fit(X_train, y_train)
    dec = clf.decision_function(X_train)
    assert dec.shape == (len(X_train), 10)
예제 #4
0
def test_decision_function(queue):
    X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype=np.float32)
    Y = np.array([1, 1, 1, 2, 2, 2], dtype=np.float32)

    clf = SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
    clf.fit(X, Y, queue=queue)

    rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
    dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
    assert_array_almost_equal(dec.ravel(), clf.decision_function(X, queue=queue))
예제 #5
0
def test_decision_function():
    X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
    Y = [1, 1, 1, 2, 2, 2]

    clf = SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
    clf.fit(X, Y)

    rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
    dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
    assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
예제 #6
0
def _test_libsvm_parameters(queue, array_constr, dtype):
    X = array_constr([[-2, -1], [-1, -1], [-1, -2],
                      [1, 1], [1, 2], [2, 1]], dtype=dtype)
    y = array_constr([1, 1, 1, 2, 2, 2], dtype=dtype)

    clf = SVC(kernel='linear').fit(X, y, queue=queue)
    assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
    assert_array_equal(clf.support_, [1, 3])
    assert_array_equal(clf.support_vectors_, (X[1], X[3]))
    assert_array_equal(clf.intercept_, [0.])
    assert_array_equal(clf.predict(X), y)
예제 #7
0
def test_svc_sigmoid(queue, dtype):
    X_train = np.array([[-1, 2], [0, 0], [2, -1],
                        [+1, +1], [+1, +2], [+2, +1]], dtype=dtype)
    X_test = np.array([[0, 2], [0.5, 0.5],
                       [0.3, 0.1], [2, 0], [-1, -1]], dtype=dtype)
    y_train = np.array([1, 1, 1, 2, 2, 2], dtype=dtype)
    svc = SVC(kernel='sigmoid').fit(X_train, y_train, queue=queue)

    assert_array_equal(svc.dual_coef_, [[-1, -1, -1, 1, 1, 1]])
    assert_array_equal(svc.support_, [0, 1, 2, 3, 4, 5])
    assert_array_equal(svc.predict(X_test, queue=queue), [2, 2, 1, 2, 1])
예제 #8
0
def test_pickle(queue):
    iris = datasets.load_iris()
    clf = SVC(kernel='linear').fit(iris.data, iris.target, queue=queue)
    expected = clf.decision_function(iris.data, queue=queue)

    import pickle
    dump = pickle.dumps(clf)
    clf2 = pickle.loads(dump)

    assert type(clf2) == clf.__class__
    result = clf2.decision_function(iris.data, queue=queue)
    assert_array_equal(expected, result)
예제 #9
0
def test_decision_function_shape(queue):
    X, y = make_blobs(n_samples=80, centers=5, random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

    # check shape of ovo_decition_function=True
    clf = SVC(kernel='linear',
              decision_function_shape='ovo').fit(X_train, y_train, queue=queue)
    dec = clf.decision_function(X_train, queue=queue)
    assert dec.shape == (len(X_train), 10)

    with pytest.raises(ValueError, match="must be either 'ovr' or 'ovo'"):
        SVC(decision_function_shape='bad').fit(X_train, y_train, queue=queue)
예제 #10
0
def _test_binary_dataset(queue, kernel):
    X, y = make_classification(n_samples=80, n_features=20, n_classes=2, random_state=0)
    sparse_X = sp.csr_matrix(X)

    dataset = sparse_X, y, sparse_X
    clf = SVC(kernel=kernel)
    check_svm_model_equal(queue, clf, *dataset)
예제 #11
0
def _test_simple_dataset(queue, kernel):
    X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
    sparse_X = sp.lil_matrix(X)
    Y = [1, 1, 1, 2, 2, 2]

    X2 = np.array([[-1, -1], [2, 2], [3, 2]])
    sparse_X2 = sp.dok_matrix(X2)

    dataset = sparse_X, Y, sparse_X2
    clf = SVC(kernel=kernel, gamma=1)
    check_svm_model_equal(queue, clf, *dataset)
예제 #12
0
def _test_iris(queue, kernel):
    iris = datasets.load_iris()
    rng = np.random.RandomState(0)
    perm = rng.permutation(iris.target.size)
    iris.data = iris.data[perm]
    iris.target = iris.target[perm]
    sparse_iris_data = sp.csr_matrix(iris.data)

    dataset = sparse_iris_data, iris.target, sparse_iris_data

    clf = SVC(kernel=kernel)
    check_svm_model_equal(queue, clf, *dataset, decimal=2)
예제 #13
0
def test_estimator():
    def dummy(*args, **kwargs):
        pass

    md = sklearn.utils.estimator_checks
    saved = _replace_and_save(md, [
        'check_sample_weights_invariance',  # Max absolute difference: 0.0008
        'check_estimators_fit_returns_self',  # ValueError: empty metadata
        'check_classifiers_train',  # assert y_pred.shape == (n_samples,)
        'check_estimators_unfitted',  # Call 'fit' with appropriate arguments
    ], dummy)
    check_estimator(SVC())
    _restore_from_saved(md, saved)
예제 #14
0
def test_sparse_realdata(queue):
    data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
    indices = np.array([6, 5, 35, 31])
    indptr = np.array(
        [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
         2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
         2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
    X = sp.csr_matrix((data, indices, indptr))
    y = np.array(
        [1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
         0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
         0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
         3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
         0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
         3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
         1., 3.])

    clf = SVC(kernel='linear').fit(X.toarray(), y, queue=queue)
    sp_clf = SVC(kernel='linear').fit(X, y, queue=queue)

    assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
    assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
예제 #15
0
def test_estimator():
    def dummy(*args, **kwargs):
        pass

    md = sklearn.utils.estimator_checks
    saved = _replace_and_save(
        md,
        [
            'check_sample_weights_invariance',  # Max absolute difference: 0.0008
            'check_estimators_fit_returns_self',  # ValueError: empty metadata
            'check_estimators_pickle',  # NotImplementedError
            'check_classifiers_predictions',  # Cannot cast ufunc 'multiply'
            'check_classifiers_train',  # assert y_pred.shape == (n_samples,)
            'check_classifiers_regression_target',  # Did not raise ValueError
            'check_supervised_y_2d',  # expected 1 DataConversionWarning
            'check_estimators_unfitted',  # Call 'fit' with appropriate arguments
        ],
        dummy)
    check_estimator(SVC())
    _restore_from_saved(md, saved)
예제 #16
0
def test_iris(queue):
    iris = datasets.load_iris()
    clf = SVC(kernel='linear').fit(iris.data, iris.target, queue=queue)
    assert clf.score(iris.data, iris.target, queue=queue) > 0.9
    assert_array_equal(clf.classes_, np.sort(clf.classes_))