def train_rbf_svm(X, y): svc = SVC(kernel='rbf', gamma=1.0, C=1.0, random_state=1) svc.fit(X, y) plot_decision_regions(X=X, y=y, classifier=svc, test_idx=None, xlabel='x', ylabel='y', title='SVC') return svc
def train_svc(X_train_std, y_train, X_test_std, y_test): X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) svc = SVC(kernel='linear', C=100.0, random_state=1) svc.fit(X_train_std, y_train) y_pred = svc.predict(X_test_std) print("Accuracy: %.2f" % (accuracy_score(y_test, y_pred))) plot_decision_regions(X=X_combined_std, y=y_combined, classifier=svc, test_idx=range(105, 150), xlabel='petal length [standardized]', ylabel='petal width [standardized]', title='SVC') return svc
def train_logistic(X_train_std, y_train, X_test_std, y_test): X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) lgr = LogisticRegression(C=100.0, random_state=1) lgr.fit(X_train_std, y_train) y_pred = lgr.predict(X_test_std) print("Accuracy: %.2f" % (accuracy_score(y_test, y_pred))) plot_decision_regions(X=X_combined_std, y=y_combined, classifier=lgr, test_idx=range(105, 150), xlabel='petal length [standardized]', ylabel='petal width [standardized]', title='Logistic') return lgr
def train_logisticBGD(X, y): lrgd = LogisticBGD(eta=0.05, n_iter=20, random_state=1) lrgd.fit(X, y) plot_decision_regions(X=X, y=y, classifier=lrgd, test_idx=None, xlabel='petal length [standardized]', ylabel='petal width [standardized]', title='LogisticBGD') plt.plot(range(1, len(lrgd.costs) + 1), lrgd.costs, marker='o') plt.xlabel('epoch') plt.xlabel('cost') plt.xlabel('LogisticBGD-costs') plt.show()
def train_perceptron(X_train_std, y_train, X_test_std, y_test): ppn = Perceptron(max_iter=50, tol=None, eta0=0.1, shuffle=True, random_state=1, verbose=1) ppn.fit(X_train_std, y_train) y_pred = ppn.predict(X_test_std) print("Actual iter: %d" % ppn.n_iter_) print("Misclassified samples: %d of %d" % ((y_test != y_pred).sum(), y_test.sum())) print("Accuracy: %.2f" % (accuracy_score(y_test, y_pred))) X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) # y have only 1 dim plot_decision_regions(X=X_combined_std, y=y_combined, classifier=ppn, test_idx=range(105, 150), xlabel='petal length [standardized]', ylabel='petal width [standardized]', title='Perceptron') return ppn
y = df.iloc[0:100, 4].values #convert label to int #for logidstic classification y = np.where(y == 'Iris-setosa', 0, 1) print(y) #iloc designate x = df.iloc[0:100, [0, 2]].values # deep copy x x_std = np.copy(x) #standalization x_std[:,0] = (x_std[:,0] - x_std[:,0].mean())/x_std[:,0].std() x_std[:,1] = (x_std[:,1] - x_std[:,1].mean())/x_std[:,1].std() #Training clf = LogisticRegressionGD(n_iter=100, eta=0.01, random_state=1).fit(x_std,y) iris.plot_decision_regions(x_std,y,classifier=clf) plt.title('AdalineGD') plt.xlabel('sepal length standarlized') plt.ylabel('petal length standarlized') plt.legend(loc='upper left') plt.tight_layout() plt.show() plt.plot(range(1,len(clf.cost_)+1),clf.cost_, marker='o') plt.xlabel('Epochs') plt.ylabel('Sum Squeared Error') plt.tight_layout() plt.show() """ fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
self.errors.append(error) return self def net_input(self, X): return np.dot(X, self.weights[1:]) + self.weights[0] def predict(self, X): return np.where(self.net_input(X) >= 0.0, 1, -1) def train_perceptron(X, y, verbose=True): ppn = Perceptron(eta=0.1, n_iter=20) ppn.fit(X, y) if verbose: plt.plot(range(1, len(ppn.errors) + 1), ppn.errors, marker='o') plt.xlabel('epoch') plt.ylabel('error') plt.show() return ppn if __name__ == "__main__": X, y = load_iris_data() ppn = train_perceptron(X, y, verbose=False) plot_decision_regions(X, y, ppn, xlabel="sepal length [cm]", ylabel="petal length [cm]", title="perceptron")