コード例 #1
0
def main():
    data = pd.read_csv('data/iris.data', header=None)
    """
        columns in iris.data are as following
        sepal length | sepal width | petal length | petal width | species

        first we will show how our data looks like
    """
    # print('test how long it is:{}'.format(data.iloc[:, :].values.shape))
    y = data.iloc[
        0:100,
        4].values  # labels of data ( species column), we take first 100 to work only on 2 classes
    y = np.where(y == 'Iris-setosa', -1,
                 1)  # now were Iris-setosa we set class to -1, else class is 1

    X = data.iloc[0:100, [0, 2]].values  # extracted sepal and petal length

    plt.scatter(X[:50, 0],
                X[:50, 1],
                color='red',
                marker='o',
                label='iris-setosa')
    plt.scatter(X[50:, 0],
                X[50:, 1],
                color='green',
                marker='x',
                label='iris-versicolor')
    plt.title('iris-setosa and iris-versicolor')
    plt.xlabel('sepal length[cm]')
    plt.ylabel('petal length[cm]')
    plt.legend(loc='upper left')
    plt.tight_layout()
    plt.draw()
    plt.waitforbuttonpress()

    my_prcpt = Perceptron(eta=0.1, n_iter=10)

    my_prcpt.fit(X,
                 y)  # fitting with sepal and petal length (X) and classes (y)

    # now present how many errors where made when learning during each data traversal
    plt.figure()
    plt.plot(range(1, len(my_prcpt.errors_) + 1), my_prcpt.errors_, marker='.')
    plt.xticks(np.arange(1, len(my_prcpt.errors_) + 1, 1.0))
    plt.title('Errors made in following epochs')
    plt.xlabel('epochs')
    plt.ylabel('errors in prediction')
    plt.tight_layout()
    plt.draw()
    plt.waitforbuttonpress()

    plt.figure()
    plot_decision_regions(X, y, classifier=my_prcpt)
    plt.title('Decision regions')
    plt.xlabel('sepal length [cm]')
    plt.ylabel('petal length [cm]')
    plt.legend(loc='upper left')
    plt.tight_layout()
    plt.draw()
    plt.waitforbuttonpress()
コード例 #2
0
def run_average_face_perceptron(rate, epochs, standardize=False):

    # extract just the face images from both the training and validation datasets
    faces = feature_extraction.get_face_images(train_and_valid_images, train_and_valid_labels)

    # find the average of all the face images and subtract is from every image
    # this also centers all of the images
    new_training_images = feature_extraction.find_average_face(faces, train_images)

    # Find the average image in the testing set and subtract it from every image
    # this allows us to compare the testing images more accurately
    new_test_images = feature_extraction.find_average_face(test_images, test_labels)

    if standardize:
        standardized_train_images = StandardScaler().fit_transform(new_training_images)
        standardized_test_images = StandardScaler().fit_transform(new_test_images)
        average_face_perceptron = Perceptron(rate, epochs, "Perceptron Average Face (Standardized)")
        average_face_perceptron.fit(standardized_train_images, train_labels)
        average_results = average_face_perceptron.predict(standardized_test_images)
        average_face_perceptron.calculate_results(average_results, test_labels, 150)
        average_face_perceptron.graph_perceptron()

    else:
        average_face_perceptron = Perceptron(rate, epochs, "Perceptron Average Face")
        average_face_perceptron.fit(new_training_images, train_labels)
        average_results = average_face_perceptron.predict(new_test_images)
        average_face_perceptron.calculate_results(average_results, test_labels, 150)
        average_face_perceptron.graph_perceptron()
コード例 #3
0
def main():
    # chargement des données
    # le fichier .csv contient 3 groupes de points 2D
    # la première colonne du fichier correspond à x1, la deuxième à x2
    # et la troisième correspond au groupe auquel est associé le point
    filepath = "./data/test.csv"
    data, labels = load_dataset(filepath)

    # On garde le groupe de points 1 et 2
    data = data[(labels == 0) | (labels == 1)]
    labels = labels[(labels == 0) | (labels == 1)]
    labels = np.where(labels == 1, 1, -1)

    # On garde le groupe de points 1 et 3
    # data = data[(labels==0) | (labels==2)]
    # labels = labels[(labels==0) | (labels==2)]
    # labels = np.where(labels == 0, 1, -1)

    # Instanciation de la classe perceptron
    p = Perceptron(2,
                   learning_rate=0.2,
                   lr_decay=False,
                   early_stopping=True,
                   display=True)

    # Apprentissage
    p.fit(data, labels)

    # Score
    #score = p.score(data, labels)
    #print("precision : {:.2f}".format(score))

    input("Press any key to exit...")
コード例 #4
0
ファイル: diabetes.py プロジェクト: marc-harary/SLP
def main():
    # load data into shuffle matrix
    df = pd.read_csv("diabetes.csv")
    df = np.array(df)
    np.random.shuffle(df)

    # split data into labels and design matrix
    design = df[:, :-1].T
    labels = df[:, -1]
    labels = labels.reshape((labels.shape[0],1)).T # keep dimensionality to 2
    _, m_tot = design.shape

    # split into test and training data
    frac_test = .6
    split_idx = int(frac_test*m_tot)
    train_design = design[:, :split_idx]
    train_labels = labels[:, :split_idx]
    test_design = design[:, split_idx:]
    test_labels = labels[:, split_idx:]

    # fit perceptron
    perc = Perceptron()
    perc.fit(X=train_design, Y=train_labels, alpha=1e-4,
        lambd=0, epochs=100_000)
    
    # get model accuracies
    test_acc = perc.acc(X=test_design, Y=test_labels)
    train_acc = perc.acc(X=train_design, Y=train_labels)
    print("Test set accuracy: %.5f" % test_acc)
    print("Training set accuracy: %.5f" % train_acc)
コード例 #5
0
    def classification(self):
        ppt = Perceptron(eta=0.1, n_iter=10)

        ppt.fit(self.x, self.y)

        plt.plot(range(1, len(ppt.errors_) + 1), ppt.errors_, marker="o")
        plt.savefig("miss_classification.png")
コード例 #6
0
def main():

    #load dataset
    data = pd.read_csv('../data/iris.data', sep=',', header=0)
    
    #classifying with two classes only
    dataset = data[data['CLASS'] != 'Iris-virginica']    

    #using the LENGHTS for separation
    X = dataset[['SEPAL_LENGHT','PETAL_LENGHT']]   
    
    #converting classes to -1 and 1    
    y = dataset['CLASS'].apply(convert)
    
    #train Perceptron
    model = Perceptron(X.shape[1])
    model.fit(X,y)
    
    #plotting
    w = model.get_w()

    #decision boundary: w[0] + w[1]*x + w[2]*y = 0
    x1 = np.linspace(0, 10, 100)            
    x2 = -w[0]/w[2] - (w[1]/w[2])*x1
    plt.plot(x1,x2,'k')
    plt.scatter(X['SEPAL_LENGHT'],X['PETAL_LENGHT'])    
    plt.show()
コード例 #7
0
def simple_example():
    df = pd.read_csv(
        'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
        header=None)
    dummy_data = df[48:52]

    y = dummy_data.iloc[:, 4].values
    y = np.where(y == 'Iris-setosa', -1, 1)
    X = dummy_data.iloc[:, [0, 2]].values
    X[0, 1] = 3
    plt.scatter(X[:2, 0], X[:2, 1], color='red', marker='o', label='setosa')
    plt.scatter(X[2:, 0],
                X[2:, 1],
                color='blue',
                marker='x',
                label='versicolor')
    plt.xlabel('sepal length')
    plt.ylabel('petal length')
    plt.legend(loc='upper left')
    plt.show()

    ppn = Perceptron(eta=0.01, n_iter=40)
    ppn.fit(X, y)
    plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
    plt.xlabel('Epochs')
    plt.ylabel('Number of misclassifications')
    plt.show()

    plot_decision_regions(X, y, classifier=ppn)
    plt.xlabel('sepal length [cm]')
    plt.ylabel('petal length [cm]')
    plt.legend(loc='upper left')
    plt.show()
コード例 #8
0
def perceptron_model(learning_rate=0.01, n_iters=1000, split_test_ratio=0.2):

    X, y = datasets.make_blobs(n_samples=150,
                               n_features=2,
                               centers=2,
                               cluster_std=1.05,
                               random_state=2)
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=split_test_ratio, random_state=123)

    p = Perceptron(learning_rate=0.01, n_iters=1000)
    p.fit(X_train, y_train)
    predictions = p.predict(X_test)
    acc = accuracy(y_test, predictions)
    print("Perceptron classification accuracy", acc)

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    plt.scatter(X_train[:, 0], X_train[:, 1], marker='o', c=y_train)

    x0_1 = np.amin(X_train[:, 0])
    x0_2 = np.amax(X_train[:, 0])

    x1_1 = (-p.weights[0] * x0_1 - p.bias) / p.weights[1]
    x1_2 = (-p.weights[0] * x0_2 - p.bias) / p.weights[1]

    ax.plot([x0_1, x0_2], [x1_1, x1_2], 'k')

    ymin = np.amin(X_train[:, 1])
    ymax = np.amax(X_train[:, 1])
    ax.set_ylim([ymin - 3, ymax + 3])

    plt.show()

    return predictions, acc, plt
コード例 #9
0
 def test_score(self):
     expected_score = 1.0
     design_matrix, target_values = self.get_design_matrix_and_target_values('OR')
     perceptron = Perceptron(max_iter=100, learning_rate=0.2, activation_function='heaviside', seed=0)
     perceptron.fit(design_matrix, target_values)
     score = perceptron.score(design_matrix, target_values)
     self.assertEqual(expected_score, score)
コード例 #10
0
def run_train_validation_perceptron(rate, epochs):

    perceptron4 = Perceptron(rate, epochs, "Perceptron 4 Train & Validation")
    perceptron4.fit(train_and_valid_images, train_and_valid_labels)
    final_results = perceptron4.predict(test_images)
    perceptron4.calculate_results(final_results, test_labels, 150)
    perceptron4.graph_perceptron()
コード例 #11
0
def Q4():
    for m in m_num:
        class_check = True
        X = None
        y = None
        while class_check:
            X = np.random.multivariate_normal(mean, cov, m)
            y = np.sign(np.array([0.3, -0.5]) @ X.T + 0.1)
            if np.abs(np.sum(y)) == m:
                class_check = True
            else:
                class_check = False
        plt.scatter(X.T[0], X.T[1], c=y)
        pts = np.linspace(-3.5, 3.5, 1000)
        plt.plot(pts, 0.6 * pts + 0.2, label='True')
        perceptron = Perceptron()
        perceptron.fit(X, y)
        w_perceptron = perceptron.w
        plt.plot(pts,
                 -(w_perceptron[0] * pts + w_perceptron[2]) / w_perceptron[1],
                 label='Perceptron')
        clf = SVC(C=1e10, kernel='linear')
        clf.fit(X, y)
        w_clf = clf.coef_[0]
        plt.plot(pts,
                 -(w_clf[0] * pts + clf.intercept_[0]) / w_clf[1],
                 label='SVM')
        plt.legend()
        plt.title('Hyperplane Classification for %i Samples' % m)
        plt.show()
コード例 #12
0
def ch2():
    # df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", header=None)
    df = pd.read_csv(
        "http://mlr.cs.umass.edu/ml/machine-learning-databases/iris/iris.data",
        header=None)
    y = df.iloc[0:100, 4].values
    y = np.where(y == 'Iris-setosa', -1, 1)
    X = df.iloc[0:100, [0, 2]].values
    plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')
    plt.scatter(X[50:100, 0],
                X[50:100, 1],
                color='blue',
                marker='x',
                label='versicolor')

    ppn = Perceptron(eta=0.1, n_iter=10)
    ppn.fit(X, y)
    plt.xlabel('sepal length [cm]')
    plt.ylabel('petal length [cm]')
    plt.legend(loc='upper left')
    plot_decision_regions(X, y, classifier=ppn)
    plt.savefig(PIC_LOC + "iris_ch2.png")
    plt.close()

    plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
    plt.xlabel('Epochs')
    plt.ylabel('Number of misclassifications')
    plt.savefig(PIC_LOC + "iris2_ch2.png")
    plt.close()
コード例 #13
0
def main():

    X, Y = loadIris()

    p = Perceptron(learnRate=0.1, learnAccuracy=1.0)
    p.fit(X, Y)
    plotDecisionBoundary(p, X, Y)
    print(p.predict([5.0, 2.1]))
コード例 #14
0
def train_perceptron(train_X, train_y, saved_model_path):
    train_size = train_X.shape[0]
    feat_dim = train_X.shape[1]
    epoch_num = 15
    lr = 0.0001
    clf = Perceptron(input_dim=feat_dim, lr=lr, epoch_num=epoch_num)
    clf.fit(train_X, train_y)
    clf.save_model(saved_model_path)
コード例 #15
0
def iris(plotting_results, live_plotting):
    # import and ready input file
    input_file = "iris.csv"
    df = pd.read_csv(input_file, header=None)
    df.head()

    # X: values, y: targets
    # extract features
    X = df.iloc[:, 0:4].values
    # extract the label column
    y = df.iloc[:, 4].values

    # Setosa:
    y_setosa = np.where(y == 'Setosa', 1, 0)

    # Versicolor:
    y_versicolor = np.where(y == 'Versicolor', 1, 0)

    # Virginica:
    y_virginica = np.where(y == 'Virginica', 1, 0)

    sets = [y_setosa, y_versicolor, y_virginica]
    predictions = []
    y_test = []
    i = 0

    for set in sets:

        # split data into train and test sets
        X_train, X_test, y_train, y_test_tmp = train_test_split(
            X, set, test_size=0.2, random_state=123)

        y_test.append(y_test_tmp)

        # create and train model
        p = Perceptron(learning_rate=0.01, n_iters=300)
        p.fit(X_train, y_train, plot=live_plotting)
        predictions.append(p.predict(X_test))

        # predictions: exodos, y_test: stoxos
        print("Perceptron classification accuracy",
              accuracy(y_test[i], predictions[i]) * 100, "%")
        i += 1

    if (plotting_results):
        fig, (ax) = plt.subplots(1, 3, sharex=True, sharey=True)
        fig.suptitle("Results")
        for i in range(3):
            ax[i].scatter(
                range(len(y_test[i])), y_test[i], marker='o',
                color='b')  # mple teleies: pragmatikoi stoxoi (y_test)
            ax[i].scatter(range(len(predictions[i])),
                          predictions[i],
                          marker='.',
                          color='r')  # kokkinoi kykloi: exwdos (predictions)
            ax[i].set_xlabel("protypo")
            ax[i].set_ylabel("exodos (r) / stoxos (b)")
コード例 #16
0
    def __test_perceptron(self, perceptron: Perceptron):
        samples = numpy.array([[3, 3], [4, 3], [1, 1]])
        labels = numpy.array([1, 1, -1])

        perceptron.fit(samples, labels)

        for i in range(samples.shape[0]):
            self.assertEqual(perceptron.predict(samples[i]), labels[i])
            logging.debug("i = {} success".format(i))
コード例 #17
0
def step2_learning():
    ppn = Perceptron(eta=0.1)
    X, y = step1_get_data()
    ppn.fit(X, y)
    print(ppn.errors_)
    print(ppn.w_)
    with open('perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)
    print('학습 완료')
コード例 #18
0
def accuracy(y_true, y_pred):
    accuracy = np.sum(y_true == y_pred) / len(y_true)
    return accuracy

        X, y = datasets.make_blobs(n_samples=150,n_features=2,centers=2,cluster_std=1.05,random_state=2)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)

        p = Perceptron(learning_rate=0.01, n_iters=1000)
        p.fit(X_train, y_train)
        predictions = p.predict(X_test)
コード例 #19
0
def q2(X, y):
    print('Solving q2')
    model = Perceptron()
    model.fit(X,
              y,
              alpha=0.001,
              weight_init='random',
              epochs=200,
              verbose=False,
              do_plot=True)
コード例 #20
0
ファイル: main.py プロジェクト: gbroques/perceptron
def main():
    # Training data for logical OR function
    training_data = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
    design_matrix = training_data[:, :2]
    target_values = training_data[:, -1]

    perceptron = Perceptron(max_iter=100, learning_rate=0.2)
    perceptron.fit(design_matrix, target_values)
    predictions = perceptron.predict(design_matrix)
    print(predictions)
コード例 #21
0
def main():
    plotting = int(input("0. Live Plot\n1. Plot Results\n2. Both\n") or 2)
    live_plotting = plotting == 0 or plotting == 2
    plotting_results = plotting == 1 or plotting == 2
    live_plotting_3d, plotting_results_3d = False, False

    file = input(
        "Δώσε input file (a, b, c, d, ii_a, ii_b, iris, bitmap): ") or 'a'
    if (file == "iris"):
        iris(plotting_results, live_plotting)
    elif (file == "bitmap"):
        bitmap(plotting_results, live_plotting)
    else:
        input_file = 'data_package_%s.csv' % file
        if (file.__contains__("ii_")):
            live_plotting_3d, plotting_results_3d = live_plotting, plotting_results
            live_plotting, plotting_results = False, False
        df = pd.read_csv(input_file, header=0)
        df = df._get_numeric_data()
        # targets
        targets_file = 'data_package_values_%s.csv' % file
        targets_df = pd.read_csv(targets_file, header=0)
        targets_df = targets_df._get_numeric_data()

        # x: values, y: targets
        X = df.values
        y = targets_df.values

        # split data into train and test sets
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.2,
                                                            random_state=123)

        # create and train model
        p = Perceptron(learning_rate=0.01, n_iters=100)
        p.fit(X_train, y_train, plot=live_plotting, plot_3d=live_plotting_3d)
        predictions = p.predict(X_test)

        if (plotting_results):
            fig = plt.figure()
            ax = fig.add_subplot(1, 1, 1)
            # mple teleies: pragmatikoi stoxoi (y_test)
            plt.scatter(range(len(y_test)), y_test, marker='o', color='b')
            plt.scatter(range(len(predictions)),
                        predictions,
                        marker='.',
                        color='r')  # kokkinoi kykloi: exwdos (predictions)
            plt.xlabel("protypo")
            plt.ylabel("exodos (r) / stoxos (b)")
        if (plotting_results_3d):
            plot_results_3d(p, X_test, y_test, predictions)
        print("Perceptron classification accuracy",
              accuracy(y_test, predictions), "%")
    plt.show()
コード例 #22
0
ファイル: main.py プロジェクト: KapitoshkaThe1st/MAI
def main():
    x = [(-2.8, 1.4), (-0.2, -3.5), (2.8, -4), (-2.1, -2.7), (0.3, -4.1),
         (-1, -4)]
    y = [0, 1, 1, 0, 1, 0]

    class_a_x = []
    class_a_y = []
    class_b_x = []
    class_b_y = []

    for i, p in enumerate(x):
        if y[i] == 0:
            class_a_x.append(p[0])
            class_a_y.append(p[1])
        else:
            class_b_x.append(p[0])
            class_b_y.append(p[1])

    print(class_a_x)
    print(class_a_y)

    colors_a = (1, 0, 0)
    colors_b = (0, 0, 1)

    area = m.pi * 3

    plt.scatter(class_a_x, class_a_y, s=area, color=colors_a, alpha=0.5)
    plt.scatter(class_b_x, class_b_y, s=area, color=colors_b, alpha=0.5)

    plt.title('Scatter plot')
    plt.xlabel('x')
    plt.ylabel('y')

    classifier = Perceptron()
    classifier.fit(np.array(x), np.array(y), n_epochs=200)

    for p, ref in zip(x, y):
        if (classifier.predict(p) != ref):
            print(f'loser! WRONG CLASSIFICATION: {p}')
            break

    weights = classifier.get_weights()
    k = -weights[0] / weights[1]
    b = -weights[2] / weights[1]

    print(f'{k=}')
    print(f'{b=}')

    f = lambda x_: k * x_ + b

    x_graph = np.linspace(-5, 5, 2)
    y_graph = np.array(list(map(f, x_graph)))
    plt.plot(x_graph, y_graph, 'g-', linewidth=2, markersize=12)
    plt.show()
コード例 #23
0
def main():
    # load data into shuffle matrix
    df = pd.read_csv("diabetes.csv")
    data = np.array(df)
    np.random.shuffle(data)

    # split data into labels and design matrix
    design = data[:, :-1].T
    labels = data[:, -1]
    labels = labels.reshape((labels.shape[0],1)).T # keep dims to 2
    n, m_tot = design.shape

    # normalize dataset
    maxs = np.amax(design, axis=1)
    mins = np.amin(design, axis=1)
    ranges = maxs - mins
    design -= mins.reshape(len(mins),1)
    design /= ranges.reshape(len(ranges),1)

    # split into test and training data
    frac_test = .8
    split_idx = int(frac_test*m_tot)
    train_design = design[:, :split_idx]
    train_labels = labels[:, :split_idx]
    test_design = design[:, split_idx:]
    test_labels = labels[:, split_idx:]

    # fit neural network
    nn = NN(ns=[n,5,1], acts=["ReLU","ReLU","sigmoid"])
    nn.fit(train_design, train_labels, alpha=1e-2, epochs = 20_000)
    test_acc = nn.evaluate(X=test_design, Y=test_labels)
    train_acc = nn.evaluate(X=train_design, Y=train_labels)
    print("Network test set accuracy: %.5f" % test_acc)
    print("Network training set accuracy: %.5f" % train_acc)
    print()

    # fit perceptron
    perc = Perceptron()
    perc.fit(X=train_design, Y=train_labels, alpha=1e-4,
        lambd=1e-2, epochs=100_000)
    test_acc = perc.acc(X=test_design, Y=test_labels)
    train_acc = perc.acc(X=train_design, Y=train_labels)
    print("Own perceptron test set accuracy: %.5f" % test_acc)
    print("Own perceptron training set accuracy: %.5f" % train_acc)
    print()

    # fit standard template perceptron from sklearn
    clf = standardPerceptron(tol=1e-3, random_state=0)
    clf.fit(train_design.T, train_labels.squeeze())
    train_acc = clf.score(train_design.T, train_labels.squeeze())
    test_acc = clf.score(test_design.T, test_labels.squeeze())
    print("Sklearn perceptron test set accuracy: %.5f" % test_acc)
    print("Sklearn perceptron training set accuracy: %.5f"%train_acc)
    print()
コード例 #24
0
ファイル: main.py プロジェクト: ArtyomKaltovich/ib_ml
def one_vs_five():
    X, y = get_digits()
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.8,
                                                        shuffle=False)
    c = Perceptron(iterations=100)
    c.fit(X_train, y_train)
    pred = c.predict(X_train).reshape(-1)
    visualize(X_train, y_train, pred, c.w.reshape(-1), DIGITS_PLOT)
    print("Accuracy:", np.mean(pred == y_test))
コード例 #25
0
ファイル: unit_test.py プロジェクト: LeiDoubi/Lihang
 def test_e22(self):
     logger.info("test case e22")
     # data e2.1
     data_raw = np.loadtxt("Input/data_2-1.txt")
     X = data_raw[:, :2]
     y = data_raw[:, -1]
     clf = Perceptron(verbose=False)
     clf.fit(X, y)
     y_pred = clf.predict(X)
     logger.info(clf.w)
     logger.info(str(y_pred))
     self.assertListEqual(y.tolist(), y_pred.tolist())
コード例 #26
0
def train(X, y):
    classifier = Perceptron(learning_rate=0.1, n_epochs=10)
    classifier.fit(X, y)
    plt.figure(2)
    plt.title('Prediction error decreasing over time')
    time_steps = range(1, len(classifier.get_errors()) + 1)
    plt.plot(time_steps, classifier.get_errors(), marker='o')
    plt.xlabel('Epochs')
    plt.ylabel('Number of weight updates'
               )  # we only update when there is an error in prediction
    print('Number of errors per epoch (should be decreasing)')
    plot_decision_regions(X, y, classifier=classifier)
    plt.show()
コード例 #27
0
def run_pca50_perceptron():
    # Try PCA with 50 features
    pca2 = PCA(n_components=50)
    pca2.fit(train_images)
    pca_train_images2 = pca2.transform(train_images)
    pca_test_images2 = pca2.transform(test_images)

    # try perceptron with 50 dimensions
    perceptron3 = Perceptron(0.01, 15, "Perceptron 3 PCA - 50")
    perceptron3.fit(pca_train_images2, train_labels)
    perceptron3.graph_perceptron()
    results_with_pca2 = perceptron3.predict(pca_test_images2)
    perceptron3.calculate_results(results_with_pca2, test_labels, 150)
コード例 #28
0
def my_model(X_train, y_train, X_test, y_test):
    from perceptron import Perceptron
    constant = np.ones((len(X_train), 1))
    X_train = np.hstack((constant, X_train))
    const = np.ones((len(X_test), 1))
    X_test = np.hstack((const, X_test))
    lr = Perceptron(0.0004, 1000)
    lr.fit(X_train, y_train)
    y_pred_test = lr.predict(X_test)
    acc_test = accuracy_score(y_test, y_pred_test)
    prec_test = precision_score(y_test, y_pred_test, average='micro')
    recall_test = recall_score(y_test, y_pred_test, average='micro')
    return acc_test, prec_test, recall_test
コード例 #29
0
def step2_learning():
    ppn = Perceptron(eta=0.1)
    data = step1_get_data()
    X = data[0]
    y = data[1]
    # 학습한다.
    ppn.fit(X, y)
    print(ppn.errors_)
    print(ppn.w_)
    # 학습된 객체를지정한다.
    # 학습이 완료된 객체를 파일로 저장한다.
    with open('./3.IrisPerceptron/perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)
    print("학습 완료")
コード例 #30
0
def main():
    data = DataManager.load_data("data/data_banknote_authentication.txt")
    # remove break lines
    data = np.array([item.strip("\n") for item in data])
    data = np.array([item.split(',') for item in data])
    data = data.astype(np.float)

    # 0 for authentic and 1 for inauthentic
    df = pd.DataFrame(data)
    df[4] = df[4].astype(int)
    authentic = df[df[4] == 0]
    inauthentic = df[df[4] == 1]
    X = df.iloc[np.r_[0:200, 1100:1300], [0, 3]].values
    y = [0 if x < 200 else 1 for x in range(400)]
    plt.scatter(X[:200, 0],
                X[:200, 1],
                color='red',
                marker='o',
                label='authentic')
    plt.scatter(X[200:400, 0],
                X[200:400, 1],
                color='blue',
                marker='x',
                label='inauthentic')
    plt.xlabel('variance of Wavelet Transformed image')
    plt.ylabel('entropy of image')
    plt.legend(loc='upper left')
    plt.show()

    ppn = Perceptron(eta=0.1, n_iter=10)
    ppn.fit(X, y)

    plot_decision_regions(X, y, clasiifier=ppn)
    plt.xlabel('variance of Wavelet Transformed image')
    plt.ylabel('entropy of image')
    plt.legend(loc='upper left')
    plt.show()

    X_std = np.copy(X)
    X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
    X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()

    ada = AdalineGD(n_iter=10, eta=0.01)
    ada.fit(X_std, y)
    plot_decision_regions(X_std, y, clasiifier=ada)
    plt.title('Adaline - gradient descent')
    plt.xlabel('variance of Wavelet Transformed image')
    plt.ylabel('entropy of image')
    plt.legend(loc='upper left')
    plt.show()
コード例 #31
0
ファイル: driver.py プロジェクト: nikhilRP/ml-scripts
def perceptron_model():
    """ Perceptron classifier on Iris flower dataset
    """

    df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)

    # setosa and versicolor
    y = df.iloc[0:100, 4].values
    y = np.where(y == 'Iris-setosa', -1, 1)

    # sepal length and petal length
    X = df.iloc[0:100, [0, 2]].values
    ppn = Perceptron(epochs=10, eta=0.1)
    ppn.fit(X, y)
    print('Weights: %s' % ppn.w_)
コード例 #32
0
ファイル: main.py プロジェクト: KIMJINMINININN/Python
def step2_learing():
    ppn = Perceptron(eta=0.1)
    # print(ppn)
    data = step1_get_data()
    X = data[0]  #꽃잎 길이와 너비
    y = data[1]  #품종
    # 학습한다
    ppn.fit(X, y)
    print(ppn.errors_)
    print(ppn.w_)
    # 학습된 객체를 저장한다
    # 학습이 완료된 객체를 파일로 저장한다.
    with open('./perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)
    print('학습완료')
コード例 #33
0
def train_perceptron(X, y):
	"""Training the perceptron model"""
	ppn = Perceptron(eta=0.1, n_iter=10)

	ppn.fit(X, y)

	plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
	plt.xlabel('Epochs')
	plt.ylabel('Number of updates')

	plt.tight_layout()
	# plt.savefig('./perceptron_1.png', dpi=300)
	plt.show()

	plot_decision_regions(X, y, classifier=ppn)
	plt.xlabel('sepal length [cm]')
	plt.ylabel('petal length [cm]')
	plt.legend(loc='upper left')

	plt.tight_layout()
	# plt.savefig('./perceptron_2.png', dpi=300)
	plt.show()
コード例 #34
0
ファイル: demo.py プロジェクト: EsraaRagaa/ML-From-Scratch
support_vector_machine = SupportVectorMachine(C=1, kernel=rbf_kernel)

# ........
#  TRAIN
# ........
print "Training:"
print "\tAdaboost"
adaboost.fit(X_train, rescaled_y_train)
print "\tNaive Bayes"
naive_bayes.fit(X_train, y_train)
print "\tLogistic Regression"
logistic_regression.fit(X_train, y_train)
print "\tMultilayer Perceptron"
mlp.fit(X_train, y_train, n_iterations=20000, learning_rate=0.1)
print "\tPerceptron"
perceptron.fit(X_train, y_train)
print "\tDecision Tree"
decision_tree.fit(X_train, y_train)
print "\tRandom Forest"
random_forest.fit(X_train, y_train)
print "\tSupport Vector Machine"
support_vector_machine.fit(X_train, rescaled_y_train)

# .........
#  PREDICT
# .........
y_pred = {}
y_pred["Adaboost"] = adaboost.predict(X_test)
y_pred["Naive Bayes"] = naive_bayes.predict(X_test)
y_pred["K Nearest Neighbors"] = knn.predict(X_test, X_train, y_train)
y_pred["Logistic Regression"] = logistic_regression.predict(X_test)
コード例 #35
0
#             color='red', marker='o', label='setosa')
# # 品種 versicolorのプロット
# plt.scatter(X[50:100, 0], X[50:100, 1],
#             color='blue', marker='x', label='versicolor')
# # 軸ラベルの設定
# plt.xlabel('sepal length [cm]')
# plt.ylabel('petal length [cm]')
# # 凡例の設定
# plt.legend(loc='upper left')
# # 図の表示
# plt.show()

# パーセプトロンのオブジェクト生成
ppn = Perceptron(eta=0.1, n_iter=10)
# トレーニングデータへのモデルの適合
ppn.fit(X, y)
# エポックと誤分類誤差の関係の折れ線グラフをプロット
# plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
# # 軸のラベルの設定
# plt.xlabel('Epochs')
# plt.ylabel('Number of misclassifications')
# # 図の表示
# plt.show()

from matplotlib.colors import ListedColormap

def plot_decision_regions(X, y, classifier, resolution=0.02):

    # マーカーとカラーマップの準備
    markers = ('s', 'x', 'o', '^', 'v')
    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
コード例 #36
0
ax = fig1.add_subplot(1, 1, 1)

# setosa iris
ax.scatter(X["Sepal length"].iloc[:50], X["Petal length"].iloc[:50],
           color="red", marker="o", label="Iris setosa")

# versicolor iris
ax.scatter(X["Sepal length"].iloc[50:], X["Petal length"].iloc[50:],
           color="blue", marker="o", label="Iris versicolor")

ax.set(xlabel="Sepal lenght", ylabel="Petal length")
ax.legend(loc="upper left")

# train the perceptron classifier
iris_ppn = Perceptron()
iris_ppn.fit(X.values, y.values)

# plot the numbers of errors for each perceptron iteration
fig2 = plt.figure()
ax = fig2.add_subplot(1, 1, 1)

ax.plot(range(1, iris_ppn.n_iterations + 1), iris_ppn.errors_, marker="o")

ax.set(xlabel="Iterations", ylabel="Number of missclassifications")

# plot the decision boundaries for the 2D dataset
fig3, ax = plot_decision_regions(X.values, y.values, iris_ppn)

ax.set(xlabel="sepal length [cm]", ylabel= "petal length [cm]")
ax.legend(loc="upper left")
コード例 #37
0
ファイル: __main__.py プロジェクト: ArnaudParan/TP-ML
#!/usr/bin/python
#-*-coding:utf-8-*

from perceptron import Perceptron
from gen_data import gen_arti, plot_frontiere, plot_data
import matplotlib.pyplot as plt

### Generer et tracer des donnees
DATAX, DATAY = gen_arti(data_type=0, nbex=1000, eps=0.1)
PERCEP = Perceptron(eps=1e-1, max_iter=1000)
PERCEP.fit(DATAX, DATAY)
print PERCEP.score(DATAX, DATAY)
plot_frontiere(DATAX, PERCEP.predict, 50)
plot_data(DATAX, DATAY)
plt.show()
コード例 #38
0
 # plot data
 plt.scatter(training_features[:50, 0], training_features[:50, 1],
             color='red', marker='o', label='setosa')
 plt.scatter(training_features[50:100, 0], training_features[50:100, 1],
             color='blue', marker='x', label='versicolor')
 
 plt.xlabel('petal length [cm]')
 plt.ylabel('sepal length [cm]')
 plt.legend(loc='upper left')
 
 plt.tight_layout()
 # plt.savefig('./iris_1.png', dpi=300)
 plt.show()
 ppn = Perceptron(eta=0.1, n_iter=10)
 
 ppn.fit(training_features, targets)
 
 plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
 plt.xlabel('Epochs')
 plt.ylabel('Number of misclassifications')
 
 plt.tight_layout()
 # plt.savefig('./perceptron_1.png', dpi=300)
 plt.show()
 
 plot_decision_regions(training_features, targets, classifier=ppn)
 plt.xlabel('sepal length [cm]')
 plt.ylabel('petal length [cm]')
 plt.legend(loc='upper left')
 
 plt.tight_layout()