Exemplo n.º 1
0
def main():
    # Load dataset
    data = load_iris()

    X = data['data']
    y = data['target']

    # Reduce to two classes
    X = X[y != 0]
    y = y[y != 0]
    y -= 1

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

    clf = LogisticRegression()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)

    accuracy = np.mean(y_pred == y_test)
    print("Accuracy:", accuracy)

    Plot().plot_in_2d(X,
                      y,
                      title="Logistic Regression",
                      accuracy=accuracy,
                      legend_labels=data['target_names'])
Exemplo n.º 2
0
def main():
    data = load_iris()

    X = normalize(data['data'])
    y = data['target']

    # One-hot
    y = np.zeros((data['target'].shape[0], 3))
    y[np.arange(data['target'].shape[0]).astype('int'), data['target'].astype('int')] = 1

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

    clf = MLP()
    clf.fit(X_train, y_train, n_epochs=1000, lr=0.01, n_units=16)

    y_pred = np.argmax(clf.predict(X_test), axis=1)
    y_test = np.argmax(y_test, axis=1)
    accuracy = np.mean(y_pred == y_test)

    y = np.argmax(y, axis=1)

    print ("Accuracy:", accuracy)

    Plot().plot_in_2d(X, y,
        title="Multilayer Perceptron",
        accuracy=accuracy,
        legend_labels=data['target_names'])
def main():
    # Load dataset
    data = load_iris()

    X = data['data']
    y = data['target']

    # Reduce to two classes
    X = X[y != 0]
    y = y[y != 0]
    y -= 1

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

    clf = LogisticRegression()
    clf.fit(X_train, y_train)
    y_pred = np.rint(clf.predict(X_test))

    accuracy = np.mean(y_pred == y_test)
    print ("Accuracy:", accuracy)

    Plot().plot_in_2d(X, y,
        title="Logistic Regression",
        accuracy=accuracy,
        legend_labels=data['target_names'])
Exemplo n.º 4
0
def main():
    data = load_iris()

    X = normalize(data['data'])
    y = data['target']

    # One-hot
    y = np.zeros((data['target'].shape[0], 3))
    y[np.arange(data['target'].shape[0]).astype('int'),
      data['target'].astype('int')] = 1

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

    clf = MLP()
    clf.fit(X_train, y_train, n_epochs=1000, lr=0.01, n_units=16)

    y_pred = np.argmax(clf.predict(X_test), axis=1)
    y_test = np.argmax(y_test, axis=1)
    accuracy = np.mean(y_pred == y_test)

    y = np.argmax(y, axis=1)

    print("Accuracy:", accuracy)

    Plot().plot_in_2d(X,
                      y,
                      title="Multilayer Perceptron",
                      accuracy=accuracy,
                      legend_labels=data['target_names'])
Exemplo n.º 5
0
def main():

    data = load_regression()

    X = data['data'][:-150]
    y = data['target'][:-150]

    # Insert constant ones for bias
    X = np.insert(X, 0, 1, axis=1)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    n_samples, n_features = np.shape(X)

    model = LinearRegression()

    model.fit(X_train, y_train)

    y_pred = model.predict(X_test)
    mse = (np.mean(y_test - y_pred)**2)
    # Print MSE
    print('Mean Squared Error: %.4f' % mse)

    y_pred_line = model.predict(X)

    X = X[:, 1]
    X_train = X_train[:, 1]
    X_test = X_test[:, 1]

    # Color map
    cmap = plt.get_cmap('viridis')

    # Plot the results
    m1 = plt.scatter(366 * X_train, y_train, color=cmap(0.9), s=10)
    m2 = plt.scatter(366 * X_test, y_test, color=cmap(0.5), s=10)
    plt.plot(366 * X,
             y_pred_line,
             color='black',
             linewidth=2,
             label="Prediction")
    plt.suptitle("Linear Regression")
    plt.title("MSE: %.2f" % mse, fontsize=10)
    plt.legend((m1, m2), ("Training data", "Test data"), loc='lower right')
    plt.show()
Exemplo n.º 6
0
def main():
    data = load_iris()
    X = data['data']
    y = data['target'].astype('int')

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

    clf = KNN()
    y_pred = clf.predict(5, X_test, X_train, y_train)

    accuracy = np.mean(y_pred == y_test)

    print("Accuracy:", accuracy)

    Plot().plot_in_2d(X,
                      y,
                      title="K-Nearest Neighbors",
                      accuracy=accuracy,
                      legend_labels=data['target_names'])
Exemplo n.º 7
0
def main():

    data = load_regression()

    X = data['data'][:-150]
    y = data['target'][:-150]

    # Insert constant ones for bias
    X = np.insert(X, 0, 1, axis=1)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    n_samples, n_features = np.shape(X)

    model = LinearRegression()

    model.fit(X_train, y_train)

    y_pred = model.predict(X_test)
    mse = (np.mean(y_test - y_pred)**2)
    # Print MSE
    print ('Mean Squared Error: %.4f' % mse)

    y_pred_line = model.predict(X)

    X = X[:, 1]
    X_train = X_train[:, 1]
    X_test = X_test[:, 1]

    # Color map
    cmap = plt.get_cmap('viridis')

    # Plot the results
    m1 = plt.scatter(366 * X_train, y_train, color=cmap(0.9), s=10)
    m2 = plt.scatter(366 * X_test, y_test, color=cmap(0.5), s=10)
    plt.plot(366 * X, y_pred_line, color='black', linewidth=2, label="Prediction")
    plt.suptitle("Linear Regression")
    plt.title("MSE: %.2f" % mse, fontsize=10)
    plt.legend((m1, m2), ("Training data", "Test data"), loc='lower right')
    plt.show()
Exemplo n.º 8
0
def main():
    data = load_iris()
    X = data['data']
    y = data['target']

    # Reduce to two classes
    X = X[y != 2]
    y = y[y != 2]

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

    clf = LDA()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)

    accuracy = np.mean(y_pred == y_test)

    print ("Accuracy:", accuracy)

    Plot().plot_in_2d(X, y,
        title="Linear Discriminant Analysis",
        accuracy=accuracy,
        legend_labels=data['target_names'])