def test_precomputed():
    """
    SVC with a precomputed kernel.

    We test it with a toy dataset and with iris.
    """
    clf = svm.SVC(kernel='precomputed')
    # just a linear kernel
    K = np.dot(X, np.array(X).T)
    clf.fit(K, Y)
    # gram matrix
    KT = np.dot(T, np.array(X).T)
    pred = clf.predict(KT)

    assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
    assert_array_equal(clf.intercept_, [0])
    assert_array_almost_equal(clf.support_, [[2], [4]])
    assert_array_equal(pred, true_result)


    # same as before, but using function instead of the kernel
    # matrix. kernel is just a linear kernel
    kfunc = lambda x, y: np.dot(x, y.T)
    clf = svm.SVC(kernel=kfunc)
    clf.fit(X, Y)
    pred = clf.predict(T)

    assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
    assert_array_equal(clf.intercept_, [0])
    assert_array_almost_equal(clf.support_, [[2], [4]])
    assert_array_equal(pred, true_result)
Exemplo n.º 2
0
def train_svm(DATA, LABELS):
    """ train_svm(DATA, LABELS)
        Function that applies scikits.learn LIBSVM Python bindings to train a linear SVM classifier on labeled data.
        
        inputs:    DATA -- A NumPy matrix where each row is a feature vector.
                   LABELS -- A NumPy matrix where each row is a singleton value (+1 or -1) that labels the corresponding row of DATA.
                   
        outputs:   clf -- A scikits.learn native object / data structure containing the parameters for the trained SVM. Use pickling to
                          save this for persistence across different Python sessions.
    """

    # Run the scikits.learn setup and training commands; return the result.
    parameters = {'gamma': np.arange(0.1, 2.0, 0.1), 'C': np.arange(1, 20, 1)}
    clf = gs.GridSearchCV(svm.SVC(kernel='rbf', probability=True), parameters)
    clf.fit(np.asarray(DATA), np.asarray(LABELS))

    best_parameters, score = max(clf.grid_scores_, key=lambda x: x[1])
    new_gamma = best_parameters['gamma']
    new_C = best_parameters['C']

    print "Optimal parameters found: gamma %f and C %f" % (new_gamma, new_C)
    clf1 = svm.SVC(kernel='rbf', gamma=new_gamma, C=new_C, probability=True)
    clf1.fit(DATA, LABELS, class_weight='auto')

    return clf1
Exemplo n.º 3
0
def test_auto_weight():
    """Test class weights for imbalanced data"""
    # compute reference metrics on iris dataset that is quite balanced by
    # default
    X, y = iris.data, iris.target
    clf = svm.SVC(kernel="linear").fit(X, y)
    assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.99, 2)

    # make the same prediction using automated class_weight
    clf_auto = svm.SVC(kernel="linear").fit(X, y, class_weight="auto")
    assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.99, 2)

    # Make sure that in the balanced case it does not change anything
    # to use "auto"
    assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)

    # build an very very imbalanced dataset out of iris data
    X_0 = X[y == 0, :]
    y_0 = y[y == 0]

    X_imbalanced = np.vstack([X] + [X_0] * 10)
    y_imbalanced = np.concatenate([y] + [y_0] * 10)

    # fit a model on the imbalanced data without class weight info
    y_pred = svm.SVC().fit(X_imbalanced, y_imbalanced).predict(X)
    assert_almost_equal(metrics.f1_score(y, y_pred), 0.88, 2)

    # fit a model with auto class_weight enabled
    clf = svm.SVC().fit(X_imbalanced, y_imbalanced, class_weight="auto")
    y_pred = clf.predict(X)
    assert_almost_equal(metrics.f1_score(y, y_pred), 0.92, 2)
Exemplo n.º 4
0
def test_bad_input():
    """
    Test that it gives proper exception on deficient input
    """
    # impossible value of C
    assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)

    # impossible value of nu
    clf = svm.NuSVC(nu=0.0)
    assert_raises(ValueError, clf.fit, X, Y)

    Y2 = Y[:-1]  # wrong dimensions for labels
    assert_raises(ValueError, clf.fit, X, Y2)

    # Test with arrays that are non-contiguous.
    Xf = np.asfortranarray(X)
    clf = svm.SVC()
    clf.fit(Xf, Y)
    assert_array_equal(clf.predict(T), true_result)

    # error for precomputed kernelsx
    clf = svm.SVC(kernel='precomputed')
    assert_raises(ValueError, clf.fit, X, Y)

    Xt = np.array(X).T

    clf = svm.SVC(kernel='precomputed')
    clf.fit(np.dot(X, Xt), Y)
    assert_raises(ValueError, clf.predict, X)

    clf = svm.SVC()
    clf.fit(X, Y)
    assert_raises(ValueError, clf.predict, Xt)
Exemplo n.º 5
0
def test_sanity_checks_predict():
    Xt = np.array(X).T

    clf = svm.SVC(kernel='precomputed')
    clf.fit(np.dot(X, Xt), Y)
    assert_raises(ValueError, clf.predict, X)

    clf = svm.SVC()
    clf.fit(X, Y)
    assert_raises(ValueError, clf.predict, Xt)
Exemplo n.º 6
0
def test_probability():
    """
    Predict probabilities using SVC

    This uses cross validation, so we use a slightly bigger testing
    set.
    """
    from scikits.learn import datasets
    iris = datasets.load_iris()

    clf = svm.SVC(probability=True)
    clf.fit(iris.data, iris.target)

    # predict on a simple dataset
    T = [[0, 0, 0, 0],
         [2, 2, 2, 2]]
    assert_array_almost_equal(clf.predict_proba(T),
                [[ 0.993,  0.003,  0.002],
                 [ 0.740,  0.223  ,  0.035]],
                 decimal=2)

    # make sure probabilities sum to one
    pprob = clf.predict_proba(X)
    assert_array_almost_equal( pprob.sum(axis=1),
                               np.ones(len(X)))
Exemplo n.º 7
0
def test_decision_function():
    """
    Test decision_function

    Sanity check, test that decision_function implemented in python
    returns the same as the one in libsvm

    TODO: proabably could be simplified
    """
    clf = svm.SVC(kernel='linear').fit(iris.data, iris.target)

    data = iris.data[0]

    sv_start = np.r_[0, np.cumsum(clf.n_support_)]
    n_class = 3

    kvalue = np.dot(data, clf.support_vectors_.T)

    dec = np.empty(n_class * (n_class - 1) / 2)
    p = 0
    for i in range(n_class):
        for j in range(i + 1, n_class):
            coef1 = clf.dual_coef_[j - 1]
            coef2 = clf.dual_coef_[i]
            idx1 = slice(sv_start[i], sv_start[i + 1])
            idx2 = slice(sv_start[j], sv_start[j + 1])
            s = np.dot(coef1[idx1],  kvalue[idx1]) + \
                np.dot(coef2[idx2], kvalue[idx2]) + \
                clf.intercept_[p]
            dec[p] = s
            p += 1

    assert_array_almost_equal(-dec, np.ravel(clf.decision_function(data)))
Exemplo n.º 8
0
def test_weight():
    """
    Test class weights
    """
    clf = svm.SVC()
    # we give a small weights to class 1
    clf.fit(X, Y, {1: 0.1})
    # so all predicted values belong to class 2
    assert_array_almost_equal(clf.predict(X), [2] * 6)

    X_, y_ = test_dataset_classif(n_samples=200, n_features=100, param=[5, 1],
                                  seed=0)
    for clf in (linear_model.LogisticRegression(), svm.LinearSVC(), svm.SVC()):
        clf.fit(X_[: 180], y_[: 180], class_weight={0: 5})
        y_pred = clf.predict(X_[180:])
        assert np.sum(y_pred == y_[180:]) >= 11
def test_probability():
    """
    Predict probabilities using SVC

<<<<<<< HEAD:scikits/learn/tests/test_svm.py
    FIXME: is it harmless that we obtain slightly different results on
    different operating systems ? (that is why we only check for 1
    decimal precission)
    TODO: test also on an example with intercept != 0
=======
    This uses cross validation, so we use a slightly bigger testing
    set.
>>>>>>> 2f4844ca54934b842fd76a5fe38b2c4de090b666:scikits/learn/tests/test_svm.py
    """
    from scikits.learn import datasets
    iris = datasets.load_iris()

    clf = svm.SVC(probability=True)
    clf.fit(iris.data, iris.target)

    # predict on a simple dataset
    T = [[0, 0, 0, 0], [2, 2, 2, 2]]
    assert_array_almost_equal(clf.predict_proba(T),
                              [[0.993, 0.003, 0.002], [0.740, 0.223, 0.035]],
                              decimal=2)

    # make sure probabilities sum to one
    pprob = clf.predict_proba(X)
    assert_array_almost_equal(pprob.sum(axis=1), np.ones(len(X)))
Exemplo n.º 10
0
def test_libsvm_iris():
    """
    Check consistency on dataset iris.
    """

    # shuffle the dataset so that labels are not ordered
    for k in ('linear', 'rbf'):
        clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
        assert np.mean(clf.predict(iris.data) == iris.target) > 0.9

    assert_array_equal(clf.label_, np.sort(clf.label_))

    # check also the low-level API
    model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
    pred = svm.libsvm.predict(iris.data, *model)
    assert np.mean(pred == iris.target) > .95

    model = svm.libsvm.fit(iris.data,
                           iris.target.astype(np.float64),
                           kernel='linear')
    pred = svm.libsvm.predict(iris.data, *model, **{'kernel': 'linear'})
    assert np.mean(pred == iris.target) > .95

    pred = svm.libsvm.cross_validation(iris.data,
                                       iris.target.astype(np.float64),
                                       5,
                                       kernel='linear')
    assert np.mean(pred == iris.target) > .95
Exemplo n.º 11
0
def test_sparse_realdata():
    """
    Test on a subset from the 20newsgroups dataset.

    This catchs some bugs if input is not correctly converted into
    sparse format or weights are not correctly initialized.
    """

    data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
    indices = np.array([6, 5, 35, 31])
    indptr = np.array([
        0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
        2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
        2, 2, 2, 2, 2, 2, 4, 4, 4
    ])
    X = scipy.sparse.csr_matrix((data, indices, indptr))
    y = np.array([
        1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2., 0., 2., 0., 3., 0.,
        3., 0., 1., 1., 3., 2., 3., 2., 0., 3., 1., 0., 2., 1., 2., 0., 1., 0.,
        2., 3., 1., 3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2., 0., 3.,
        2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2., 3., 0., 0., 2., 2., 1., 3.,
        1., 1., 0., 1., 2., 1., 1., 3.
    ])

    clf = svm.SVC(kernel='linear').fit(X.todense(), y)
    sp_clf = svm.sparse.SVC(kernel='linear').fit(X, y)

    assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.todense())
    assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
Exemplo n.º 12
0
def test_SVC():
    """Check that sparse SVC gives the same result as SVC"""

    clf = svm.SVC(kernel='linear').fit(X, Y)
    sp_clf = svm.sparse.SVC(kernel='linear').fit(X, Y)

    assert_array_equal(sp_clf.predict(T), true_result)

    assert scipy.sparse.issparse(sp_clf.support_vectors_)
    assert_array_almost_equal(clf.support_vectors_,
                              sp_clf.support_vectors_.todense())

    assert scipy.sparse.issparse(sp_clf.dual_coef_)
    assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())

    assert scipy.sparse.issparse(sp_clf.coef_)
    assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
    assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))

    # refit with a different dataset
    clf.fit(X2, Y2)
    sp_clf.fit(X2, Y2)
    assert_array_almost_equal(clf.support_vectors_,
                              sp_clf.support_vectors_.todense())
    assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
    assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
    assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))
Exemplo n.º 13
0
    def fit(self):
        print "fit the model"
        train = np.array(self.model.data)
        X = train[:, :2]
        y = train[:, 2]

        C = float(self.complexity.get())
        gamma = float(self.gamma.get())
        coef0 = float(self.coef0.get())
        degree = int(self.degree.get())
        kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
        if len(np.unique(y)) == 1:
            clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
                                  gamma=gamma,
                                  coef0=coef0,
                                  degree=degree)
            clf.fit(X)
        else:
            clf = svm.SVC(kernel=kernel_map[self.kernel.get()],
                          C=C,
                          gamma=gamma,
                          coef0=coef0,
                          degree=degree)
            clf.fit(X, y)
        if hasattr(clf, 'score'):
            print "Accuracy:", clf.score(X, y) * 100
        X1, X2, Z = self.decision_surface(clf)
        self.model.clf = clf
        self.model.set_surface((X1, X2, Z))
        self.model.surface_type = self.surface_type.get()
        self.fitted = True
        self.model.changed("surface")
Exemplo n.º 14
0
def test_CSVC():
    """
    C_SVC algorithm and linear kernel.

    We test this on two datasets, the first one with two classes and
    the second one with three classes. We check for predicted values
    and estimated parameters.

    TODO: check with different parameters of C, nonlinear kernel
    """

    clf = svm.SVC(kernel='linear')
    clf.fit(X, Y)
    pred = clf.predict(T)
    assert_array_equal(clf.dual_coef_, [[ 0.25, -.25]])
    assert_array_equal(clf.support_, [[-1,-1], [1, 1]])
    assert_array_equal(clf.intercept_, [0.])
    assert_array_equal(pred, true_result)

    # the same with other dataset
    clf.fit(X2, Y2)
    pred = clf.predict(T2)
    assert_array_almost_equal(clf.dual_coef_,
                              [[ .99, -.006, -.49, -.49, -.07],
                               [ .072, .16, 0, 0, -.16]], decimal=2)
    # TODO: why are we getting all the dataset as support vectors
    assert_array_equal(clf.support_, 
                       [[ 0.,  0.,  0.],
                        [ 1.,  1.,  1.],
                        [ 2.,  0.,  0.],
                        [ 0.,  0.,  2.],
                        [ 3.,  3.,  3.]])
    assert_array_equal(pred, true_result2)
Exemplo n.º 15
0
def train_liblinear_classifier_core(trainXy,
                                    classifier_type="liblinear",
                                    trace_normalize=False,
                                    **kwargs):
    """ Classifier training using SVMs

    Input:
    train_features = training features (both positive and negative)
    train_labels = corresponding label vector
    svm_eps = eps of svm
    svm_C = C parameter of svm
    classifier_type = liblinear or libsvm"""

    #do normalization
    (train_features, train_labels), train_mean, train_std, trace = normalize(
        [trainXy], trace_normalize=trace_normalize)
    if classifier_type == 'liblinear':
        clf = sklearn_svm.LinearSVC(**kwargs)
    if classifier_type == 'libSVM':
        clf = sklearn_svm.SVC(**kwargs)
    elif classifier_type == 'LRL':
        clf = LogisticRegression(**kwargs)
    elif classifier_type == 'MCC':
        clf = CorrelationClassifier(**kwargs)
    elif classifier_type.startswith('svm.'):
        ct = classifier_type.split('.')[-1]
        clf = getattr(sklearn_svm, ct)(**kwargs)
    elif classifier_type.startswith('linear_model.'):
        ct = classifier_type.split('.')[-1]
        clf = getattr(sklearn_linear_model, ct)(**kwargs)

    clf.fit(train_features, train_labels)

    return clf, train_mean, train_std, trace
Exemplo n.º 16
0
def train_scikit_svm():
	# NOTE FROM SCIT KIT : All classifiers in scikit-learn do multiclass classification out-of-the-box. 
	# You don’t need to use the sklearn.multiclass module unless you want to experiment with different multiclass strategies.	
	clf = svm.SVC(decision_function_shape='ovo')
	SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
    decision_function_shape='ovo', degree=3, gamma='auto', kernel='rbf',
    max_iter=-1, probability=False, random_state=None, shrinking=True,
    tol=0.001, verbose=False)
Exemplo n.º 17
0
def get_svn_params(X, y):
    clf = svm.SVC(kernel='linear')
    clf.fit(X, y)
    if len(clf.intercept_) <> 1:
        print "ERROR in get_hyperplane(X, y). more than 1 coefficient??"
        return

    return clf.coef_[0], clf.intercept_[0], clf
Exemplo n.º 18
0
def test_error():
    """
    Test that it gives proper exception on deficient input
    """
    # impossible value of nu
    clf = svm.SVC(impl='nu_svc', kernel='linear', nu=0.0)
    assert_raises(ValueError, clf.fit, X, Y)

    Y2 = Y[:-1] # wrong dimensions for labels
    assert_raises(ValueError, svm.SVC, X, Y2)

    # Test with arrays that are non-contiguous.
    Xt = np.array(X).transpose()
    Yt = [1, 2]
    clf = svm.SVC()
    clf.fit(Xt, Yt)
    assert_array_equal(clf.predict(T), [1, 2, 2])
Exemplo n.º 19
0
def svm_classification(table):
    '''
    '''
    from scikits.learn import svm
    X = table[:, 1:]
    Y = table[:, 0]
    clf = svm.SVC()
    clf.fit(X, Y)
    print clf.support_
Exemplo n.º 20
0
def fitting(D, cp, ca, kwargs={'kernel': 'linear', 'C': 1.0}):
    #clfP = svm.SVC(kernel='rbf', gamma=0.7, C=1.0)
    #clfA = svm.SVC(kernel='rbf', gamma=0.7, C=1.0)

    clfP = svm.SVC(**kwargs)
    clfA = svm.SVC(**kwargs)

    log('fitting plastic')
    t = time.time()
    clfP.fit(D, cp)
    ttime = time.time() - t
    log('in %f sec' % (time.time() - t))
    log('fitting animals')
    t = time.time()
    clfA.fit(D, ca)
    ttime += (time.time() - t)
    log('in %f sec' % (time.time() - t))
    return clfP, clfA, ttime
Exemplo n.º 21
0
def test_weight():
    """
    Test class weights
    """
    clf = svm.SVC()
    # we give a small weights to class 1
    clf.fit(X, Y, {1: 0.1})
    # so all predicted values belong to class 2
    assert_array_almost_equal(clf.predict(X), [2]*6)
Exemplo n.º 22
0
def train(*filenames):
    """Returns a classifier that """
    data = None
    answers = None
    all_images = []
    for filename in filenames:
        print filename
        if not os.path.exists(filename) and os.path.exists(filename + '.code'):
            return False
        keys = getTrainingKey(filename + '.code')
        images = getImDictFromImage(filename)
        this_data = images.reshape(images.shape[0], -1)
        this_answers = numpy.array(keys)
        all_images.extend(images)
        if data is None:
            answers = this_answers
            data = this_data
        else:
            data = numpy.concatenate([data, this_data], 0)
            answers = numpy.concatenate([answers, this_answers], 0)

    print 'image shape', images.shape
    print 'data shape', data.shape
    print 'answers shape', answers.shape

    from scikits.learn import svm
    from scikits.learn.metrics import classification_report
    from scikits.learn.metrics import confusion_matrix
    classifier = svm.SVC()

    divider = 400

    classifier.fit(data[:divider], answers[:divider])

    expected = answers[divider:]
    predicted = classifier.predict(data[divider:])

    print "check:"
    print classifier
    print 'predicted', predicted
    print
    print classification_report(expected, predicted)

    print confusion_matrix(expected, predicted)
    print 'len of all_images:', len(all_images)

    for index, (image, prediction) in enumerate(
            zip(all_images[divider:], predicted)[:25]):
        #for index, (image, prediction) in enumerate(zip(all_images, answers)[50:75]):
        print index, prediction

        pylab.subplot(5, 5, index + 1)
        pylab.imshow(image, cmap=pylab.cm.gray_r)
        pylab.title('Prediction: ' + numToTile(prediction))

    pylab.show()
Exemplo n.º 23
0
def test_margin():
    """
    Test predict_margin
    TODO: more tests
    """
    clf = svm.SVC()
    clf.fit(X, Y)
    assert_array_almost_equal(clf.predict_margin(T),
                              [[0.976], [-0.939], [-0.619]],
                              decimal=3)
Exemplo n.º 24
0
def test_coef_and_intercept_SVC_vs_LinearSVC():
    """
    Test that SVC and LinearSVC return the same coef_ and intercept_
    """
    svc = svm.SVC(kernel='linear', C=1).fit(X, Y)
    linsvc = svm.LinearSVC(C=1, penalty='l2', loss='l1', dual=True).fit(X, Y)

    assert_array_equal(linsvc.coef_.shape, svc.coef_.shape)
    assert_array_almost_equal(linsvc.coef_, svc.coef_, decimal=5)
    assert_array_almost_equal(linsvc.intercept_, svc.intercept_, decimal=5)
Exemplo n.º 25
0
def test_error():
    """
    Test that it gives proper exception on deficient input
    """
    # impossible value of C
    assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)

    # impossible value of nu
    clf = svm.NuSVC(nu=0.0)
    assert_raises(ValueError, clf.fit, X, Y)

    Y2 = Y[:-1]  # wrong dimensions for labels
    assert_raises(ValueError, clf.fit, X, Y2)
    assert_raises(ValueError, svm.SVC, X, Y2)

    # Test with arrays that are non-contiguous.
    Xf = np.asfortranarray(X)
    clf = svm.SVC()
    clf.fit(Xf, Y)
    assert_array_equal(clf.predict(T), true_result)
Exemplo n.º 26
0
def test_sample_weights():
    """
    Test weights on individual samples
    """
    clf = svm.SVC()
    clf.fit(X, Y)
    assert_array_equal(clf.predict(X[2]), [1.])

    sample_weight = [.1] * 3 + [10] * 3
    clf.fit(X, Y, sample_weight=sample_weight)
    assert_array_equal(clf.predict(X[2]), [2.])
Exemplo n.º 27
0
def test_libsvm_parameters():
    """
    Test parameters on classes that make use of libsvm.
    """

    clf = svm.SVC(kernel='linear').fit(X, Y)
    assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
    assert_array_equal(clf.support_, [1, 3])
    assert_array_equal(clf.support_vectors_, (X[1], X[3]))
    assert_array_equal(clf.intercept_, [0.])
    assert_array_equal(clf.predict(X), Y)
Exemplo n.º 28
0
def demo(num=10, kwargs={'kernel': 'rbf', 'gamma': 0.7, 'C': 1.0}):
    D, cp, ca = loadData()
    Dr = D[0:num, [689, 3659]]  #,1444]]
    cpr, car = cp[0:num], ca[0:num]

    clfP = svm.SVC(**kwargs)
    log('fitting plastic')
    t = time.time()
    clfP.fit(Dr, cpr)
    log('in %f sec' % (time.time() - t))
    plotClf(clfP, Dr, cpr)
    return clfP, Dr, cpr
Exemplo n.º 29
0
def test_libsvm_iris():
    """
    Check consistency on dataset iris.
    """

    # shuffle the dataset so that labels are not ordered

    for k in ('linear', 'rbf'):
        clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
        assert np.mean(clf.predict(iris.data) == iris.target) > 0.9

    assert_array_equal(clf.label_, np.sort(clf.label_))
Exemplo n.º 30
0
def test_sample_weights():
    """
    Test weights on individual samples
    """
    # TODO: check on NuSVR, OneClass, etc.
    clf = svm.SVC()
    clf.fit(X, Y)
    assert_array_equal(clf.predict(X[2]), [1.])

    sample_weight = [.1] * 3 + [10] * 3
    clf.fit(X, Y, sample_weight=sample_weight)
    assert_array_equal(clf.predict(X[2]), [2.])