示例#1
0
def main():
    # Example 1
    def load_simple_data():
        features = ([[1.0, 2.1], [2.0, 1.1], [1.3, 1.0], [1.0, 1.0],
                     [2.0, 1.0]])
        labels = [1.0, 1.0, -1.0, -1.0, 1.0]
        return np.array(features), np.array(labels)

    X, y = load_simple_data()
    model = AdaBoostClassifier(n_estimators=5)
    model.fit(X, y)

    y_pred = model.predict(X)
    print(y_pred)
    accuracy = calculate_accuracy_score(y, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))

    # Example 2
    X, y = make_gaussian_quantiles(n_samples=1300, n_features=10, n_classes=2)

    n_split = 300
    X_train, X_test = X[:n_split], X[n_split:]
    y_train, y_test = y[:n_split], y[n_split:]

    model = AdaBoostClassifier(n_estimators=100)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    accuracy = calculate_accuracy_score(y_test, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))
示例#2
0
def main():
    iris = load_iris()
    # we only take the first two features. We could avoid this ugly
    # slicing by using a two-dim dataset
    X = iris.data[:, :2]
    y = iris.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    model = KNeighborsClassifier(n_neighbors=5)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    accuracy = calculate_accuracy_score(y_test, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))

    h = .05  # step size in the mesh
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])

    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

    plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold, edgecolor='k', s=20)
    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    plt.savefig('./examples/example_KNeighborsClassifier.png')
示例#3
0
def main():
    iris = load_iris()
    X = iris.data
    y = iris.target

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    model = XGBoost(n_estimators=300, learning_rate=0.001)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)

    accuracy = calculate_accuracy_score(y_test, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))
示例#4
0
def main():
    iris = load_iris()
    X = iris.data
    y = iris.target

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    clf = GradientBoostingClassifier(n_estimators=200, learning_rate=.5)
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)

    accuracy = calculate_accuracy_score(y_test, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))
def main():
    iris = load_iris()
    X = iris.data
    y = iris.target

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    model = RandomForestClassifier(n_estimators=200, max_depth=10)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)

    accuracy = calculate_accuracy_score(y_test, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))
def main():
    data = load_banknote()
    X = data.data
    y = data.target

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.4)

    model = ClassificationTree(max_depth=5)
    model.fit(X_train, y_train)
    model.print_tree()

    y_pred = model.predict(X_test)
    accuracy = calculate_accuracy_score(y_test, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))
示例#7
0
def main():
    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target

    y = to_categorical(y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    model = Perceptron(max_iter=5000, learning_rate=0.001, penalty=l2_loss)
    model.fit(X_train, y_train)

    y_pred = np.argmax(model.predict(X_test), axis=1)
    y = np.argmax(y_test, axis=1)

    accuracy = calculate_accuracy_score(y, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))
示例#8
0
def main():
    # Example 1
    X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
    Y = np.array([1, 1, 1, 2, 2, 2])

    model = GaussianNB()
    model.fit(X, Y)
    print(model.predict([[-0.8, -1]]))

    # Example 2
    iris = load_iris()
    X = normalize(iris.data)
    y = iris.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    model = GaussianNB()
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    accuracy = calculate_accuracy_score(y_test, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))
def main():
    iris = load_iris()
    X = iris["data"][:, 3:]  # petal width
    y = (iris["target"] == 2).astype(np.int)  # 1 if Iris-Virginica, else 0

    model = LogisticRegression(max_iter=1000,
                               learning_rate=0.001,
                               fit_intercept=True)
    model.fit(X, y)
    y_pred = model.predict(X)

    accuracy = calculate_accuracy_score(y, y_pred)
    print("Accuracy Score: {:.2%}".format(accuracy))

    X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
    y_proba = model.predict_proba(X_new)

    plt.plot(X_new, y_proba[:, 0], "m--", label="Not Iris-Virginica")
    plt.plot(X_new, y_proba[:, 1], "c-", label="Iris-Virginica")

    plt.plot(X[y == 0], y[y == 0], "m.")
    plt.plot(X[y == 1], y[y == 1], "c.")
    plt.legend()
    plt.savefig("./examples/example_LogisticRegression.png")