コード例 #1
0
from main import mglearn, train_test_split, plt, np

from sklearn.svm import SVC

X, y = mglearn.tools.make_handcrafted_dataset()
svm = SVC(kernel='rbf', C=10, gamma=0.1).fit(X, y)
mglearn.plots.plot_2d_separator(svm, X, eps=.5)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)

fig, axes = plt.subplots(3, 3, figsize=(15, 10))

for ax, C in zip(axes, [-1, 0, 3]):
    for a, gamma in zip(ax, range(-1, 2)):
        mglearn.plots.plot_svm(log_C=C, log_gamma=gamma, ax=a)
axes[0, 0].legend(["Class 0", "Class 1", "sv class 0", "sv class 1"],
                  ncol=4,
                  loc=(.9, 1.2))

sv = svm.support_vectors_
sv_labels = svm.dual_coef_.ravel() > 0
mglearn.discrete_scatter(sv[:, 0],
                         sv[:, 1],
                         sv_labels,
                         s=15,
                         markeredgewidth=3)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.show()
コード例 #2
0
from main import mglearn, np, plt, train_test_split

from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_moons

X, y = make_moons(n_samples=100, noise=0.25, random_state=3)
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    stratify=y,
                                                    random_state=0)
forest = RandomForestClassifier(n_estimators=5, random_state=2)
forest.fit(X_train, y_train)

fig, axes = plt.subplots(2, 3, figsize=(20, 10))
for i, (ax, tree) in enumerate(zip(axes.ravel(), forest.estimators_)):
    ax.set_title("Tree {}".format(i))
    mglearn.plots.plot_tree_partition(X_train, y_train, tree, ax=ax)
mglearn.plots.plot_2d_separator(forest,
                                X_train,
                                fill=True,
                                ax=axes[-1, -1],
                                alpha=.4)
axes[-1, -1].set_title("Random Forest")
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.show()
コード例 #3
0
from main import mglearn, train_test_split, plt, np

from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans

# mglearn.plots.plot_kmeans_algorithm()
# mglearn.plots.plot_kmeans_boundaries()

X, y = make_blobs(random_state=1)
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
print("{}".format(kmeans.labels_))

mglearn.discrete_scatter(X[:, 0], X[:, 1], kmeans.labels_, markers="o")
mglearn.discrete_scatter(
    kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], [0, 1, 2], markers='^', markeredgewidth=2
)

plt.show()
コード例 #4
0
from main import mglearn, train_test_split, plt, np

from sklearn.datasets import make_blobs
from sklearn.svm import LinearSVC

X, y = make_blobs(random_state=42)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)

linear_svm = LinearSVC().fit(X, y)

# 傾き(ベクトル)
print("Coefficient shape: ", linear_svm.coef_.shape)
print("Coefficient shape: ", linear_svm.coef_)
# 切片
print("Intercept shape: ", linear_svm.intercept_.shape)
print("Intercept shape: ", linear_svm.intercept_)

line = np.linspace(-15, 15)
for coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_,
                                  ['b', 'r', 'g']):
    plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)
plt.ylim(-10, 15)
plt.xlim(-10, 8)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.legend([
    'Class 0', 'Class 1', 'Class 2', 'Line class 0', 'Line class 1',
    'Line class 1', 'Line class 2'
],
           loc=(1.01, 0.3))
コード例 #5
0
from main import mglearn, train_test_split, plt

from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC

X, y = mglearn.datasets.make_forge()

fig, axes = plt.subplots(1, 2, figsize=(10, 3))

for model, ax in zip([LinearSVC(), LogisticRegression()], axes):
    clf = model.fit(X, y)
    mglearn.plots.plot_2d_separator(clf,
                                    X,
                                    fill=False,
                                    eps=0.5,
                                    ax=ax,
                                    alpha=.7)
    mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
    ax.set_title("{}".format(clf.__class__.__name__))
    ax.set_xlabel("Feature 0")
    ax.set_ylabel("Feature 1")
axes[0].legend()
plt.show()
コード例 #6
0
#     ax[i].hist(malignant[:, i], bins=bins, color=mglearn.cm3(0), alpha=.5)
#     ax[i].hist(benign[:, i], bins=bins, color=mglearn.cm3(2), alpha=.5)
#     ax[i].set_title(cancer['feature_names'][i])
#     ax[i].set_yticks(())
# ax[0].set_xlabel("Feature magnitude")
# ax[0].set_ylabel("Fr")

# ax[0].legend(['mailnant', 'begin'], loc='best')
# fig.tight_layout()

from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(cancer['data'])
X_scaled = scaler.transform(cancer['data'])

from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X_scaled)
X_pca = pca.transform(X_scaled)
print("{}".format(str(X_scaled.shape)))
print("{}".format(str(X_pca.shape)))

plt.figure(figsize=(8, 8))
mglearn.discrete_scatter(X_pca[:, 0], X_pca[:, 1], cancer['target'])
plt.legend(cancer['target_names'], loc='best')
plt.gca().set_aspect('equal')
plt.xlabel("First")
plt.ylabel("Seccond")

plt.show()