Exemplo n.º 1
0
def main():
    data = datasets.load_digits()
    x = normalize(data.data)
    y = data.target
    #one-hot encoding
    y = to_categorical(y)
    x_train, x_test, y_train, y_test = train_test_split(x, y, seed=1)

    clf = Perceptron(n_iterations=5000,
                     learning_rate=0.001,
                     loss=CrossEntropy,
                     activation_function=Sigmoid)
    clf.fit(x_train, y_train)

    y_pred = np.argmax(clf.predict(x_test), axis=1)
    y_test = np.argmax(y_test, axis=1)

    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)

    Plot().plot_2d(x_test,
                   y_pred,
                   title="Perceptron",
                   accuracy=accuracy,
                   legend_label=np.unique(y))
Exemplo n.º 2
0
def main():
    data = datasets.load_iris()
    X = normalize(data.data)
    y = data.target
    x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.30)

    knn = KNN(k=5)
    y_pred = knn.predict(x_test, x_train, y_train)
    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)

    Plot().plot_2d(x_test,
                   y_pred,
                   title="K Nearest Neighbors",
                   accuracy=accuracy,
                   legend_label=data.target_names)
Exemplo n.º 3
0
def main():
    data = datasets.load_iris()
    # using two class only
    x = normalize(data.data[data.target != 0])
    y = data.target[data.target != 0]
    y[y == 1] = 0
    y[y == 2] = 1
    x_train, x_test, y_train, y_test = train_test_split(x, y, seed=1)
    model = LogisticRegression()
    model.fit(x_train, y_train)
    y_pred = model.predict(x_test)
    accuracy = accuracy_score(y_test, y_pred)
    print ("Accuracy:", accuracy)

    # plot the results
    Plot().plot_2d(x_test, y_pred, title="Logistic Regression", accuracy=accuracy)
Exemplo n.º 4
0
def main():
    data = datasets.load_digits()
    x = normalize(data.data)
    y = data.target
    x_train, x_test, y_train, y_test = train_test_split(x, y)

    clf = NaiveBayes()
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)
    Plot().plot_2d(x_test,
                   y_pred,
                   title="Naive Bayes",
                   accuracy=accuracy,
                   legend_label=data.target_names)
def main():
    print("Gradient Boosting Classification")
    data = datasets.load_iris()
    x = data.data
    y = data.target
    x_train, x_test, y_train, y_test = train_test_split(x, y)

    clf = GradientBoostingClassifier()
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)

    Plot().plot_2d(x_test,
                   y_pred,
                   title="Gradient Boosting",
                   accuracy=accuracy,
                   legend_label=data.target_names)
Exemplo n.º 6
0
def main():
    print("XGBoost")
    data = datasets.load_iris()
    x = data.data
    y = data.target
    x_train, x_test, y_train, y_test = train_test_split(x, y, seed=1)

    clf = XGBoost()
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)

    Plot().plot_2d(x_test,
                   y_pred,
                   title="XGBoost",
                   accuracy=accuracy,
                   legend_label=data.target_names)
def main():
    data = datasets.load_iris()
    x = data.data
    y = data.target
    # Three -> two classes
    x = x[y != 2]
    y = y[y != 2]
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)

    lda = LDA()
    lda.fit(x_train, y_train)
    y_pred = lda.predict(x_test)

    accuracy = accuracy_score(y_test, y_pred)

    print ("Accuracy:", accuracy)

    Plot().plot_2d(x_test, y_pred, title="LDA", accuracy=accuracy)
Exemplo n.º 8
0
def main():
    print("Random Forest")
    data = datasets.load_digits()
    x = data.data
    y = data.target
    x_train, x_test, y_train, y_test = train_test_split(x, y, seed=1)

    clf = RandomForest(n_estimators=100)
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)

    Plot().plot_2d(x_test,
                   y_pred,
                   title="Random Forest",
                   accuracy=accuracy,
                   legend_label=data.target_names)
Exemplo n.º 9
0
def main():
    data = datasets.load_digits()
    x = data.data
    y = data.target
    digit1 = 1
    digit2 = 5
    idx = np.append(np.where(y == digit1)[0], np.where(y == digit2)[0])
    y = data.target[idx]
    # Change labels to {-1, 1}
    y[y == digit1] = -1
    y[y == digit2] = 1
    x = data.data[idx]
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5)

    clf = Adaboost(n_clf=5)
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)

    Plot().plot_2d(x_test, y_pred, title="Adaboost", accuracy=accuracy)
Exemplo n.º 10
0
 def accuracy(self, y, y_pred):
     return accuracy_score(np.argmax(y, axis=1), np.argmax(y_pred, axis=1))