예제 #1
0
import numpy as np


def sigmoid(z):
    return 1.0 / (1.0 + np.exp(-z))


X_train_std, X_test_std, y_train, y_test = load_iris_and_fit_data()

# Training data by Logistic Regression
# C = 1/lambda = to avoid overfitting
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)

# Plot result
plot(X_train_std, X_test_std, y_train, y_test, lr, range(105, 150))

# Predict
y_pred = lr.predict(X_test_std)
print('Accuracy Score = %.2f' % accuracy_score(y_test, y_pred))

# Checking for understand
print('Coef Matrix = \n%s' % lr.coef_.T)
print('Intercept Matrix = \n%s' % lr.intercept_)
y_pred_proba = lr.predict_proba(X_test_std)
sigmoid_result = sigmoid(np.dot(X_test_std, lr.coef_.T) + lr.intercept_)
sum_row_result = sigmoid_result.sum(axis=1, keepdims=True)
product_matrix_result = np.true_divide(sigmoid_result, sum_row_result)
print('Auto Predict')
print(y_pred_proba[0:5, :].round(3))
print('Predict by product matrix')
예제 #2
0
svm3 = SVC(kernel='rbf', C=1.0, random_state=0, gamma=0.2)
svm3.fit(X_xor, y_xor)

plt.figure(3)
plot_decision_regions(X_xor, y_xor, svm3)
plt.legend(loc=2)
plt.title('C=1.0, gamma=0.2')

svm4 = SVC(kernel='rbf', C=1.0, random_state=0, gamma=1000)
svm4.fit(X_xor, y_xor)

plt.figure(4)
plot_decision_regions(X_xor, y_xor, svm4)
plt.legend(loc=2)
plt.title('C=1.0, gamma=1000')

plt.show()

X_train_std, X_test_std, y_train, y_test = load_iris_and_fit_data()
svm.fit(X_train_std, y_train)
plot(X_train_std, X_test_std, y_train, y_test, svm)

svm4.fit(X_train_std, y_train)
plot(X_train_std, X_test_std, y_train, y_test, svm4)

y_pred = svm.predict(X_test_std)
print('Accuracy (gamma = 0.01) = %.2f' % accuracy_score(y_test, y_pred))

y_pred4 = svm4.predict(X_test_std)
print('Accuracy (gamma = 1000) = %.2f' % accuracy_score(y_test, y_pred4))
예제 #3
0
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from LoadData import load_iris_and_fit_data
from PlotData import plot

X_train_std, X_test_std, y_train, y_test = load_iris_and_fit_data()

perceptron = Perceptron(max_iter=40, eta0=0.01, random_state=0)
perceptron.fit(X_train_std, y_train)

plot(X_train_std, X_test_std, y_train, y_test, perceptron, range(105, 150))

y_pred = perceptron.predict(X_test_std)
print('Missclassified sample: %d' % (y_pred != y_test).sum())
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
예제 #4
0
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from LoadData import load_iris_only
from PlotData import plot
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score

X_train, X_test, y_train, y_test = load_iris_only()
tree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0)
tree.fit(X_train, y_train)
plot(X_train, X_test, y_train, y_test, tree)
y_pred = tree.predict(X_test)
print('Accuracy (Tree) = %.2f' % accuracy_score(y_test, y_pred))

# Export decision tree
#export_graphviz(tree, out_file='tree.dot', feature_names=['petal length', 'petal width'])
# Install GraphViz and run 'dot -Tpng tree.dot -o tree.png' in the terminal for convert .dot to .png file

forest = RandomForestClassifier(criterion='entropy',
                                n_estimators=10,
                                random_state=1,
                                n_jobs=2)
forest.fit(X_train, y_train)
plot(X_train, X_test, y_train, y_test, forest)
y_pred = forest.predict(X_test)
print('Accuracy (Forest) = %.2f' % accuracy_score(y_test, y_pred))
예제 #5
0
from sklearn.neighbors import KNeighborsClassifier
from LoadData import load_iris_and_fit_data
from PlotData import plot
from sklearn.metrics import accuracy_score

X_train_std, X_test_std, y_train, y_test = load_iris_and_fit_data()

knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
knn.fit(X_train_std, y_train)

plot(X_train_std, X_test_std, y_train, y_test, knn)

y_pred = knn.predict(X_test_std)
print('Accuracy (Forest) = %.2f' % accuracy_score(y_test, y_pred))