from sklearn.ensemble import RandomForestClassifier import numpy as np import matplotlib.pyplot as plt from Util import plot_decision_regions from IrisData import getIrisData X_train, X_test, X_combined, y_train, y_test, y_combined = getIrisData( standardized=False) forest = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2) forest.fit(X_train, y_train) plot_decision_regions(X_combined, y_combined, classifier=forest, test_idx=range(105, 150)) plt.xlabel('petal length [cm]') plt.ylabel('petal width [cm]') plt.show()
import matplotlib.pyplot as plt import numpy as np from Util import plot_decision_regions from sklearn.svm import SVC from sklearn import datasets from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler np.random.seed(0) X_xor = np.random.randn(200, 2) y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0) y_xor = np.where(y_xor, 1, -1) gamma = 1 svm = SVC(kernel='rbf', C=10.0, random_state=0, gamma=gamma) svm.fit(X_xor, y_xor) plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.title("RBF Kernel SVM ($\gamma $ = %f)" % gamma) plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y) sc = StandardScaler() sc.fit(X_train) X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test) ppn = Perceptron(n_iter=40, eta0=0.1, random_state=1) ppn.fit(X_train_std, y_train) y_pred = ppn.predict(X_test_std) # print('Misclassified samples: %d' % (y_test != y_pred).sum()) # print('Accuracy: %.2f' % accuracy_score(y_test, y_pred)) X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined_std = np.hstack((y_train, y_test)) plot_decision_regions(X=X_combined_std, y=y_combined_std, classifier=ppn, test_idx=range(105, 150)) plt.xlabel('ptal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show()
from Wine import getWineData from Util import plot_decision_regions from sklearn.lda import LDA from sklearn.linear_model import LogisticRegression import numpy as np import matplotlib.pyplot as plt X_train_std, X_test_std, y_train, y_test = getWineData() lda = LDA(n_components=2) X_train_lda = lda.fit_transform(X_train_std, y_train) lr = LogisticRegression() lr.fit(X_train_lda, y_train) plot_decision_regions(X_train_lda, y_train, classifier=lr) plt.xlabel('LD 1') plt.ylabel('LD 2') plt.legend(loc='lower left') plt.show()
import matplotlib.pyplot as plt import numpy as np from Util import plot_decision_regions from sklearn.linear_model import LogisticRegression from sklearn import datasets from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler iris = datasets.load_iris() X = iris.data[:, [2, 3]] y = iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=0) sc = StandardScaler() sc.fit(X_train) X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test) lr = LogisticRegression(C=1000.0, random_state=0) lr.fit(X_train_std, y_train) X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X_combined_std, y_combined, lr, test_idx=range(105, 150)) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show()