def plot_rbf_svm_parameters():
    X, y = make_handcrafted_dataset()

    fig, axes = plt.subplots(1, 3, figsize=(12, 4))
    for ax, C in zip(axes, [1e0, 5, 10, 100]):
        ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])

        svm = SVC(kernel='rbf', C=C).fit(X, y)
        plot_2d_separator(svm, X, ax=ax, eps=.5)
        ax.set_title("C = %f" % C)

    fig, axes = plt.subplots(1, 4, figsize=(15, 3))
    for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
        ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
        svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
        plot_2d_separator(svm, X, ax=ax, eps=.5)
        ax.set_title("gamma = %f" % gamma)
def plot_linear_svc_regularization():
    X, y = make_blobs(centers=2, random_state=4, n_samples=30)
    # a carefully hand-designed dataset lol
    y[7] = 0
    y[27] = 0

    fig, axes = plt.subplots(1, 3, figsize=(12, 4))

    for ax, C in zip(axes, [1e-2, 1, 1e2]):
        ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])

        svm = SVC(kernel='linear', C=C).fit(X, y)
        plot_2d_separator(svm, X, ax=ax, eps=.5)
        ax.set_title("C = %f" % C)
def plot_svm(log_C, log_gamma):
    X, y = make_blobs(centers=2, random_state=4, n_samples=30)
    # a carefully hand-designed dataset lol
    y[7] = 0
    y[27] = 0
    C = 10. ** log_C
    gamma = 10. ** log_gamma
    svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
    ax = plt.gca()
    plot_2d_separator(svm, X, ax=ax, eps=.5)
    # plot data
    ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
    # plot support vectors
    sv = svm.support_vectors_
    ax.scatter(sv[:, 0], sv[:, 1], s=230, facecolors='none', zorder=10, linewidth=3)
    ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_rbf_svm_parameters():
    X, y = make_handcrafted_dataset()

    fig, axes = plt.subplots(1, 3, figsize=(12, 4))
    for ax, C in zip(axes, [1e0, 5, 10, 100]):
        ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])

        svm = SVC(kernel='rbf', C=C).fit(X, y)
        plot_2d_separator(svm, X, ax=ax, eps=.5)
        ax.set_title("C = %f" % C)

    fig, axes = plt.subplots(1, 4, figsize=(15, 3))
    for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
        ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
        svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
        plot_2d_separator(svm, X, ax=ax, eps=.5)
        ax.set_title("gamma = %f" % gamma)
def plot_svm(log_C, log_gamma):
    X, y = make_handcrafted_dataset()
    C = 10.**log_C
    gamma = 10.**log_gamma
    svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
    ax = plt.gca()
    plot_2d_separator(svm, X, ax=ax, eps=.5)
    # plot data
    ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
    # plot support vectors
    sv = svm.support_vectors_
    ax.scatter(sv[:, 0],
               sv[:, 1],
               s=230,
               facecolors='none',
               zorder=10,
               linewidth=3)
    ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
예제 #6
0
"""
#LogisticRegression and Prediction
#logistic regression is called linear model it will separate two inputs with a straight line
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression()
"""

#KNeginbour classifiers
from sklearn.neighbors import KNeighborsClassifier
classifier=KNeighborsClassifier(n_neighbors=10);
classifier.fit(X_train,y_train)
y_predict=classifier.predict(X_test)


import plot_2d_separator as pd      #special function
pd.plot_2d_separator(classifier,X_train)

#Accuracy
print(" accuracy = {} ".format(np.sum(y_predict==y_test)/np.size(y_test)))
# or np.mean(y_predict==y_test)
# or classifier.score(X_test,y_test)

plt.figure()
#visualize test results

plt.scatter(X_test[y_test==y_predict][:,0],X_test[y_predict==y_test][:,1],c=y_test[y_predict==y_test],marker='o')
plt.scatter(X_test[y_test!=y_predict][:,0],X_test[y_predict!=y_test][:,1],c='black',marker='x')
plt.title("Testing Data")
plt.xlabel('feature_1')
plt.ylabel('feature_2')
plt.show()
예제 #7
0
# plot data
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='k', cmap=plt.cm.coolwarm)
plt.show()

degree = 7
coeffs = [1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8]

plt.figure()

for C in coeffs:
    # create logistic regression classifier
    plt.subplot(2, 4, coeffs.index(C) + 1)
    plt.tight_layout()
    poly_features = PolynomialFeatures(degree=degree, include_bias=False)
    log_reg = LogisticRegression(C=C)
    model = Pipeline([("poly_features", poly_features), ("logistic_regression", log_reg)])

    # train classifier
    model.fit(X, y)
    accuracy = model.score(X, y)

    # plot classification results
    title = "C = {:.2e} ({:.2f}%)"
    plot_2d_separator(model, X, fill=True)
    plt.scatter(X[:, 0], X[:, 1], s=15, c=y, alpha=0.5, edgecolors='k', cmap=plt.cm.coolwarm)
    plt.title(title.format(C, accuracy * 100), fontsize=10)

plt.show()