Пример #1
0
# combine train and test set again
X_combined = np.append(X_train, X_test, axis=0)
y_combined = np.append(y_train, y_test)

# plot coefficients over varying C parameter
weights, params, scores = [], [], []
for c in np.arange(-5, 5):
    model = SVC(kernel='linear', C=10.**c, random_state=1)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    weights.append(model.coef_[1])
    params.append(10.**c)
    scores.append(accuracy_score(y_test, y_pred))
    plt.figure(figsize=(8, 6))
    plot_regions(X_combined,
                 y_combined,
                 model,
                 test_idx=range(X_train.shape[0], X.shape[0]))
    plt.xlabel('petal length')
    plt.ylabel('petal width')
    plt.legend(loc='upper left')
weights = np.array(weights)

plt.figure(figsize=(8, 6))
plt.plot(params, weights[:, 0], label='petal length [standardized]')
plt.plot(params,
         weights[:, 1],
         label='petal width [standardized]',
         linestyle='--')
plt.xlabel('C')
plt.ylabel('coefficients')
plt.legend(loc='upper left')
Пример #2
0
plt.ylim([-3,3])
plt.xlabel('first element')
plt.ylabel('second element')
plt.legend(loc='best')
plt.tight_layout()

X_train, X_test, y_train, y_test = train_test_split(X_xor, y_xor, test_size=0.33, random_state=42)
X_combined = np.append(X_train, X_test, axis=0)
y_combined = np.append(y_train, y_test)
kernelSVM = SVC(kernel='rbf', random_state=1, gamma=0.1, C=10.0)
kernelSVM.fit(X_train, y_train)
y_pred_kernel = kernelSVM.predict(X_test)
score_kernel = accuracy_score(y_test, y_pred_kernel)
print("accuracy_kernel:", score_kernel)
plt.figure('kernel SVM', figsize=(8,6))
plot_regions(X_combined, y_combined, kernelSVM, test_idx=range(X_train.shape[0], X_xor.shape[0]))
plt.legend(loc='best')
plt.tight_layout()

X_xor = np.append(X_xor, np.multiply(X_xor[:, 0], X_xor[:, 1]).reshape(-1,1), axis=1)

X_train, X_test, y_train, y_test = train_test_split(X_xor, y_xor, test_size=0.33, random_state=42)
X_combined = np.append(X_train, X_test, axis=0)
y_combined = np.append(y_train, y_test)
linearSVM = SVC(kernel='linear', C=10, random_state=1)
linearSVM.fit(X_train, y_train)
y_pred_linear = linearSVM.predict(X_test)
score_linear = accuracy_score(y_test, y_pred_linear)
print("accuracy_linear:", score_linear)

plot_regions3D(X_combined, y_combined, linearSVM, test_idx=range(X_train.shape[0], X_xor.shape[0]))
df = pd.read_csv('wine_data.csv')
X, y = df.iloc[:, 1:].values, df.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y)

scaler = StandardScaler()
X_train_std = scaler.fit_transform(X_train)
X_test_std = scaler.transform(X_test)

# LDA from sklean
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
X_test_lda = lda.transform(X_test_std)

# logistic regression
lr = LogisticRegression(multi_class='ovr', random_state=1, solver='lbfgs')
lr.fit(X_train_lda, y_train)
y_pred = lr.predict(X_test_lda)
score = accuracy_score(y_test, y_pred)
print("accuracy:", score)

# plot decision boundaries
X_combined = np.append(X_train_lda, X_test_lda, axis=0)
y_combined = np.append(y_train, y_test)

plot_regions(X_combined, y_combined, classifier=lr, test_idx=range(X_train.shape[0], X.shape[0]))
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend(loc='lower left')
plt.tight_layout()
plt.show()
Пример #4
0
X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()

model = LogisticGD(eta=0.01, n_iter=30)
model.fit(X, y)
predict = model.predict(X)

# plot cost function at each training batch
plt.figure('training curve', figsize=(8, 6))
plt.plot(range(1, model.n_iter + 1), model.cost_, marker='o')
plt.xlabel('epochs')
plt.ylabel('inverse-log-likelihood')
plt.tight_layout()

# plot samples and decision boundary
plt.figure('setosa & versicolor', figsize=(8, 6))
plot_regions(X, y, model)
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend(loc='upper left')
plt.show()
"""
bound = -(model.w_[1]/model.w_[2])*np.linspace(x_min, x_max, 50) - model.w_[0]/model.w_[2]
plt.figure('setosa & versicolor', figsize=(8, 6))
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Set1, edgecolor='k')
plt.plot(np.linspace(x_min, x_max, 50), bound)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
print("Label counts in y_train:", np.bincount(y_train))
print("Label counts in y_test:", np.bincount(y_test))

sc = StandardScaler()
sc.fit(X_train)
X_train = sc.transform(X_train)
X_test = sc.transform(X_test)

model = Perceptron()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Accuracy: %.3f" % accuracy_score(y_test, y_pred))

'''
plt.figure('learning curve', figsize=(8, 6))
plt.plot(range(1, model.n_iter+1), model.errors_, marker='o')
plt.xlabel('epochs')
plt.ylabel('cost')
plt.tight_layout()
'''

X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plt.figure('setosa & versicolor', figsize=(8, 6))
plot_regions(X=X_combined, y=y_combined,classifier=model,test_idx=range(len(y_train), len(y)))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()