def benchmark(clf):
    t0 = time()
    clf.fit(X_train, y_train)
    train_time = time() - t0
    t0 = time()
    pred = clf.predict(X_test)
    test_time = time() - t0
    err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
    return err, train_time, test_time
def benchmark(clf):
    t0 = time()
    clf.fit(X_train, y_train)
    train_time = time() - t0
    t0 = time()
    pred = clf.predict(X_test)
    test_time = time() - t0
    err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
    return err, train_time, test_time
Beispiel #3
0
def test_losses():
    """test loss functions"""
    assert_equal(zero_one(y[half:], y_), 13)
    assert_almost_equal(mean_square_error(y[half:], y_), 12.999, 2)
    assert_almost_equal(explained_variance(y[half:], y_), -0.04, 2)
Beispiel #4
0
## }
## print("Training LinearSVC on training set")
## clf = LinearSVC(**parameters)
print("Training SGD with alpha=0.001 and n_iter=2")
clf = SGD(alpha=0.001, n_iter=2)
t0 = time()
clf.fit(X_train, y_train)
print "done in %fs" % (time() - t0)

print "Predicting the outcomes of the testing set"
t0 = time()
pred = clf.predict(X_test)
print "done in %fs" % (time() - t0)

print "Classification performance:"
print
print metrics.classification_report(
    y_test,
    pred,
    labels=[-1, 1],
    class_names=['any other types', 'cover type 1'])
print ""

err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
print "Error rate: %.4f" % err
print ""

cm = metrics.confusion_matrix(y_test, pred)
print "Confusion matrix:"
print cm