def __new_perceptorns(self):
        self.hidden_pers = []
        self.output_pers = []
        self.history_weight, self.history_idx = [], 0

        if self.dim == 2:
            self.history_weight.append([])

        # hidden layer
        for i in range(self.dim):
            p = perceptron.Perceptron(self.dim)
            p.set_learning_rate(self.learning_rate)
            if self.dim == 2:
                self.history_weight[self.history_idx].append(p.weight)
            self.hidden_pers.append(p)

        # output layer
        if self.classes == 2:
            p = perceptron.Perceptron(self.dim)
            p.set_learning_rate(self.learning_rate)
            if self.dim == 2:
                self.history_weight[self.history_idx].append(p.weight)
            self.output_pers.append(p)
        else:
            for i in range(self.classes):
                p = perceptron.Perceptron(self.dim)
                p.set_learning_rate(self.learning_rate)
                if self.dim == 2:
                    self.history_weight[self.history_idx].append(p.weight)
                self.output_pers.append(p)
 def __init__(self, inputlength):
     self.hiddenlayer = [
         p.Perceptron(inputlength),
         p.Perceptron(inputlength),
         p.Perceptron(inputlength),
         p.Perceptron(inputlength)
     ]
     self.output = [p.Perceptron(4)]
Esempio n. 3
0
def runPerceptron(numTrainValues, numTestValues, pixels, tune, useTrainedWeights, info):
    """
    runPerceptron() runs the perceptron learning algorithm on the MNIST dataset.
    It also prints associated analytics, including the accuracy and time taken
    to run.

    Keyword arguments:
    numTrainValues -- number of training values to train the perceptron
    numTestValues -- number of test values to test the trained perceptron
    pixels -- number of pixels to chop from the margins of the image
    tune -- a boolean for whether to tune to find the optimal number of iterations
    useTrainedWeights -- boolean to use pretrained weights
    info -- boolean to get information about common classification mistakes
    """
    t = time.clock()
    perceptronClassifier = perceptron.Perceptron(range(10), 3)

    if useTrainedWeights:
        perceptronClassifier.useTrainedWeights()
    else:
        print "Loading Testing Data....\n"
        trainingData, trainingLabels, validationData, validationLabels, features = loadFeatures.loadTrainingData(numTrainValues, pixels, tune)

        print "Training Perceptron....\n"
        perceptronClassifier.train(trainingData, trainingLabels, validationData, validationLabels, tune)


    print "Loading Testing Data....\n"
    testingData, testingLabels = loadFeatures.loadTestingData(numTestValues, pixels)

    print "Testing Perceptron....\n"
    classifiedData = perceptronClassifier.classify(testingData)
    test(classifiedData, testingLabels, info)

    print "Total Time {0}".format(time.clock() - t)
    def __init__(self, num_nodes, num_inputs_per_node):
        self.num_nodes = num_nodes

        # create nodes
        self.nodes = []
        for i in range(num_nodes):
            self.nodes.append(perceptron.Perceptron(num_inputs_per_node))
Esempio n. 5
0
def init_perceptrons(selected_training_algorithm="RPLA"):
    for _ in range(10):
        perceptrons.append(pr.Perceptron(5 * 5))

    global training_inputs
    training_inputs = [np.ravel(n) for n in numbers]
    if selected_training_algorithm == "RPLA":
        for i in range(20):
            labels = np.zeros(20)
            labels[i % 10] = 1
            labels[i % 10 + 10] = 1
            print(f"Wagi perceptronu dla liczby: {i % 10}")
            perceptrons[i % 10].train_rpla(training_inputs, labels)
    if selected_training_algorithm == "SPLA":
        for i in range(20):
            labels = np.zeros(20)
            labels[i % 10] = 1
            labels[i % 10 + 10] = 1
            print(f"Wagi perceptronu dla liczby: {i % 10}")
            perceptrons[i % 10].train_spla(training_inputs, labels)
    if selected_training_algorithm == "PLA":
        for i in range(20):
            labels = np.zeros(20)
            labels[i % 10] = 1
            labels[i % 10 + 10] = 1
            print(f"Wagi perceptronu dla liczby: {i % 10}")
            perceptrons[i % 10].train_pla(training_inputs, labels)
def training():
    brain = perceptron.Perceptron()
    i = 0
    iteration = 1
    while( i < iteration ):
        for p in points:
            inputs = [p.x, p.y, p.bias] #bias = 1
            predict = brain.guess(inputs)

            #Getting the predicted line by the neural network
            yLow, yHigh = brain.guessLine()
            animXLine.append([xLowLimit, xHighLimit])
            animYLine.append([yLow, yHigh])

            #Train the neural network
            error = p.target - predict
            brain.train(inputs, error)
        i = i + 1

    #Training done
    for p in points:
        inputs = [p.x, p.y, p.bias] #bias = 1
        predict = brain.guess(inputs)
        if ( p.target == 1 ):
            ax.plot(p.x, p.y, 'go', label = 'correct', markersize = 10, markeredgecolor = 'k')
        else:
            ax.plot(p.x, p.y, 'ro', label = 'wrong', markersize = 10, markeredgecolor = 'k')
Esempio n. 7
0
    def __init__(self, name, x_pos, y_pos, x_speed, y_speed, world,
                 num_directions, learning_rate, epochs, color):

        pygame.sprite.Sprite.__init__(self)

        self.x_pos = x_pos
        self.y_pos = y_pos
        self.old_x_pos = x_pos
        self.old_y_pos = y_pos
        self.x_speed = x_speed
        self.y_speed = y_speed
        self.score = 0
        self.world = world
        self.num_directions = num_directions
        self.percy = perceptron.Perceptron(
            (num_directions * len(self.world.pop_names)), num_directions,
            learning_rate, epochs)
        self.color = color

        self.dir_vector = []

        if self.color == 0:
            self.image = pygame.image.load("Images/30_30_Red_Square.png")
        elif self.color == 1:
            self.image = pygame.image.load("Images/30_30_Blue_Square.png")
        else:
            print("Unknown colour number: ", self.color)

        x_size, y_size = self.image.get_rect().size
        self.rect = pygame.Rect(x_pos, y_pos, x_size, y_size)
        self.mask = pygame.mask.from_surface(self.image)
        self.name = name
        self.dir = (x_pos, y_pos)
Esempio n. 8
0
    def test_perceptron_or(self, mock):
        andTest = []

        andTestInnerItem = [0] * 2
        andTestItem = [andTestInnerItem]
        andTestItem.append(0)
        andTest.append(andTestItem)

        andTestInnerItem = [0]
        andTestInnerItem.append(1)
        andTestItem = [andTestInnerItem]
        andTestItem.append(1)
        andTest.append(andTestItem)

        andTestInnerItem = [1]
        andTestInnerItem.append(0)
        andTestItem = [andTestInnerItem]
        andTestItem.append(1)
        andTest.append(andTestItem)

        andTestInnerItem = [1] * 2
        andTestItem = [andTestInnerItem]
        andTestItem.append(1)
        andTest.append(andTestItem)

        perceptron1 = perceptron.Perceptron(.1, .2)
        perceptron1.train(andTest)

        self.assertNotEqual(None, mock.call_args)

        matches = re.match('succeeded after \d+ iterations',
                           mock.call_args[0][0])
        self.assertNotEqual(
            re.match('succeeded after \d+ iterations', mock.call_args[0][0]),
            None)
Esempio n. 9
0
    def test_full(self):
        """ FULL INTEGRATION TEST
        This test runs all three trainings required for HW #1

        ASSUMPTION:
        That you have the MNIST train and test files in current directory
        with exact filename below
        """
        train_file = 'mnist_train.csv'
        test_file = 'mnist_validation.csv'

        bias = 1
        epochs = 50

        p = pt.Perceptron(sizes=[785, 10], train_filename=train_file, test_filename=test_file, bias=bias)

        rate = 0.00001
        model, accuracy = p.train(rate=rate, epochs=epochs)
        assert (model.shape == (785, 10))
        assert (accuracy > .80)

        rate = 0.001
        model, accuracy = p.train(rate=rate, epochs=epochs)
        assert (model.shape == (785, 10))
        assert (accuracy > .80)

        rate = 0.1
        model, accuracy = p.train(rate=rate, epochs=epochs)
        assert (model.shape == (785, 10))
        assert (accuracy > .80)
def when_used_on_a_linearly_seperable_dataset_test():
    # Given a perceptron trained on a linearly seperable dataset
    number_correct = 0
    simulations_to_run = 100
    datapoints_per_sim = 100
    for i in range(simulations_to_run):
        x = numpy.random.uniform(0, 10, size=datapoints_per_sim)
        y = .75 * x + 5.0 * numpy.random.normal(
            loc=0.0, scale=2.0, size=datapoints_per_sim)
        classification = map(lambda t: 1
                             if t[1] > .75 * t[0] else 0, zip(x, y))
        training_dataset = zip(zip(x, y), classification)
        the_perceptron = perceptron.Perceptron(
            max_iterations=200, bias=0.5,
            training_rate=.001)  #, training_rate=.01
        the_perceptron.train(training_dataset)

        # When predicting
        test_results = [(the_perceptron.predict(i[0]), i[1])
                        for i in training_dataset]
        number_correct += len(filter(lambda x: x[0] == x[1], test_results))
    print number_correct / (1.0 * simulations_to_run * datapoints_per_sim)
    assert number_correct / (
        1.0 * simulations_to_run * datapoints_per_sim
    ) >= 0.8, "Then it should have much better than random performance."
Esempio n. 11
0
def irisTrainSGD(type=0):
    X_train, X_test, y_train, y_test = dbload.load_iris_dataset()

    if type == 1:
        irisPerceptron = AdalineSGD(0.001, 40, 1)
        irisPerceptron.fit_sgd(X_train, y_train)
    elif type == 0:
        irisPerceptron = AdalineGD(0.001, 40)
        irisPerceptron.fit_adaline(X_train, y_train)
    elif type == 2:
        irisPerceptron = AdalineSGD(0.001, 40)
        irisPerceptron.fit_mbgd(X_train, y_train)
    else:
        import time
        import perceptron
        irisPerceptron = perceptron.Perceptron(0.01, 50)
        start = time.time()
        irisPerceptron.fit_batch(X_train, y_train)
        print("time {:.4f}ms".format((time.time() - start) * 1000))

    predict = irisPerceptron.predict(X_test)
    errnum = (predict != y_test).sum()
    print("Misclassified number {}, Accuracy {:.2f}%".format(errnum, \
          (X_test.shape[0] - errnum)/ X_test.shape[0] * 100))

    #irisPerceptron.draw_errors()
    #irisPerceptron.draw_separate_line(X, y, 'iris')
    #irisPerceptron.draw_converge_lines(X, y)
    #irisPerceptron.draw_vectors()
    #irisPerceptron.draw_costs()
    #print("LastCost: %f" % irisPerceptron.costs_[-1])
    print('Weights: %s' % irisPerceptron.w_)
Esempio n. 12
0
def compare_D2():
    mean_perception_accuracy = []
    mean_svm_accuracy = []

    for m in training_sizes:
        perceptron_accuracy = 0
        svm_accuracy = 0
        for i in range(500):
            mone = 0
            training_points, training_classified = get_points_from_D2(m)
            while 1 not in training_classified or -1 not in training_classified:
                training_points, training_classified = get_points_from_D2(m)
            p = perceptron.Perceptron()
            p.fit(training_points, training_classified)

            testing_points, testing_classified = get_points_from_D2(K)
            for i in range(len(testing_points)):
                if p.predict(testing_points[i]) == testing_classified[i]:
                    mone += 1
            temp = mone / K
            perceptron_accuracy += temp

            svm = SVC(C=1e10, kernel='linear')
            svm.fit(training_points, training_classified)
            svm_accuracy += svm.score(testing_points, testing_classified)
        mean_perception_accuracy.append(perceptron_accuracy / 500)
        mean_svm_accuracy.append(svm_accuracy / 500)

    plt.plot(training_sizes, mean_perception_accuracy, label="mean_perception")
    plt.plot(training_sizes, mean_svm_accuracy, label="mean_svm")
    plt.title("Distribution D2")
    plt.xlabel("m")
    plt.ylabel("mean accuracy")
    plt.legend(loc=4)
    plt.show()
Esempio n. 13
0
def gaussian_grid_matrix(ngrids, sigma):
    # pairwise formular:
    # np.exp(-lg.norm(x-y)**2/(2 * (sigma ** 2)))
    # sigma = 1
    p = perceptron.Perceptron(0.02)

    # n = math.sqrt(npoints)
    x1 = np.linspace(-1, 1, ngrids)
    x2 = np.linspace(-1, 1, ngrids)
    X, Y = np.meshgrid(x1, x2)

    X_list = np.concatenate(X)
    Y_list = np.concatenate(Y)

    XX = np.vstack([X_list, Y_list])

    LXX = X_list**2 + Y_list**2

    DXX = np.dot(XX.T, XX)

    RLXX = np.tile(LXX, (X_list.shape[0], 1))

    D = RLXX + RLXX.T - 2 * DXX
    K = np.exp(-D / (2 * sigma**2))
    return K
Esempio n. 14
0
def learn(feature_matrix, class_labels):
    classifier = perceptron.Perceptron(feature_matrix,
                                       class_labels,
                                       learning_rate=0.1,
                                       iterations=10)
    classifier.fit()
    return classifier
Esempio n. 15
0
    def test_report(self):
        bias = 1
        p = pt.Perceptron(sizes=[785, 10], bias=bias)
        p.rate = 0.1
        p.test_labels = testdata.test_labels_10

        predictions = [7, 2, 1, 0, 9, 1, 4, 9, 6, 9]
        test_accuracy = 0.80
        train_epoch_accuracy = [0.08735, 0.8756, 0.86975, 0.8623666666666666, 0.8714333333333333, 0.8639333333333333, 0.8916666666666667, 0.8642833333333333, 0.8893333333333333, 0.8867333333333334, 0.8793166666666666, 0.8778666666666667, 0.87285, 0.8519, 0.8774333333333333, 0.8725333333333334, 0.8682333333333333, 0.8848666666666667, 0.8672333333333333, 0.88585, 0.8916833333333334, 0.8859833333333333, 0.8844, 0.86395, 0.8768666666666667, 0.87785, 0.8990166666666667, 0.8808166666666667, 0.8971833333333333, 0.8925166666666666, 0.8995666666666666, 0.8684833333333334, 0.8915333333333333, 0.87115, 0.87725, 0.88015, 0.8810666666666667, 0.88425, 0.89875, 0.9054666666666666, 0.8758333333333334, 0.87295, 0.8893333333333333, 0.8859666666666667, 0.8913666666666666, 0.8685666666666667, 0.9002333333333333, 0.8891333333333333, 0.8698666666666667, 0.8714833333333334, 0.8774]
        test_epoch_accuracy = [0.0863, 0.877, 0.8631, 0.8585, 0.8631, 0.8531, 0.8861, 0.8558, 0.8825, 0.8776, 0.8684, 0.8656, 0.8702, 0.8418, 0.8654, 0.8673, 0.8595, 0.8746, 0.8555, 0.8757, 0.8786, 0.878, 0.8743, 0.8567, 0.8629, 0.8659, 0.8898, 0.8695, 0.8864, 0.8797, 0.8859, 0.8549, 0.8815, 0.8624, 0.8625, 0.8698, 0.8699, 0.8727, 0.8863, 0.8939, 0.8599, 0.8649, 0.879, 0.8732, 0.8805, 0.8561, 0.8859, 0.876, 0.8581, 0.8615, 0.8651]

        conf_matrix = p.report(rate=p.rate, prediction=predictions, test_accuracy=test_accuracy,
                 train_epoch_accuracy=train_epoch_accuracy, test_epoch_accuracy=test_epoch_accuracy)

        # @formatter:off
        expected_conf_matrix = np.array \
        ([
            [1,0,0,0,0,0,0,0,0,0],
            [0,2,0,0,0,0,0,0,0,0],
            [0,0,1,0,0,0,0,0,0,0],
            [0,0,0,0,0,0,0,0,0,0],
            [0,0,0,0,1,0,0,0,0,1],
            [0,0,0,0,0,0,1,0,0,0],
            [0,0,0,0,0,0,0,0,0,0],
            [0,0,0,0,0,0,0,1,0,0],
            [0,0,0,0,0,0,0,0,0,0],
            [0,0,0,0,0,0,0,0,0,2]
        ])

        #ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
        assert(np.allclose(conf_matrix, expected_conf_matrix))
Esempio n. 16
0
    def getTrainedNeuron(self):
        neuron = perceptron.Perceptron()
        trainData = [[1, 1, 1, 0, 1, 0, 0, 1, 0], [1, 0, 1, 1, 1, 1, 1, 0, 1]]
        target = [0, 1]

        neuron.to_train(trainData, target)
        return neuron
Esempio n. 17
0
def test(eta, n_iter, X, y):
    ppn = perceptron.Perceptron(eta=eta, n_iter=n_iter)
    ppn.fit(X, y)
    plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
    plt.xlabel('Epochs')
    plt.ylabel('Number of errors')
    plt.show()
Esempio n. 18
0
def grade1():
    print("=" * 20 + "Grading Problem 1" + "=" * 20)
    marks = 0
    accs = [0.90, 0.85, 0.70, 0.50]
    try:
        X_train, Y_train, X_test, Y_test = perceptron.get_data('D2')

        assert perceptron.get_features(
            X_train[0]).size <= 5, 'Atmost 5 features are allowed'

        X_train = np.array([perceptron.get_features(x) for x in X_train])
        X_test = np.array([perceptron.get_features(x) for x in X_test])

        C = max(np.max(Y_train), np.max(Y_test)) + 1
        D = X_train.shape[1]

        p = perceptron.Perceptron(C, D)

        p.train(X_train, Y_train)
        acc = p.eval(X_test, Y_test)

        if acc >= accs[0]:
            marks += 2.0
        elif acc >= accs[1]:
            marks += 1.5
        elif acc >= accs[2]:
            marks += 1.0
        elif acc >= accs[3]:
            marks += 0.5
    except:
        print('Error')
    print("Marks obtained in Problem 1: ", marks)
    return marks
def predict_what_weve_seen_in_the_past_test():
    the_perceptron = perceptron.Perceptron()
    the_perceptron.train([([0], 0), ([1], 1)])
    result = the_perceptron.predict([0])
    assert result == 0

    result = the_perceptron.predict([1])
    assert result == 1
def learn_NAND_with_zero_as_activation_test():
    the_perceptron = perceptron.Perceptron()
    the_perceptron.train([([1, -1], 1), ([1, 1], 0), ([-1, 1], 1), ([-1,
                                                                     -1], 1)])
    result = the_perceptron.predict([-1, 1])
    assert result, "should be true"
    result = the_perceptron.predict([1, 1])
    assert not result, "should be false"
Esempio n. 21
0
 def carrega_pesos_salvos(self, pesos_salvos):
     print(">> Caregando pesos salvos")
     for i in range(self.n_classes):
         self.camada.append(
             perceptron.Perceptron(taxa_aprendizado=self.taxa_aprendizado,
                                   epocas=self.epocas,
                                   bias=self.bias))
         self.camada[i].carrega_pesos(pesos_salvos.iloc[i, :].tolist())
Esempio n. 22
0
 def test_classify(self):
   classifier = perceptron.Perceptron(self.labels_weights)
   fs1 = {"f1": 1.0, "f2": 9.0, "f3":1.0}
   fs2 = {"f1": 8.0, "f2": 1.0, "f3":1.0}
   fs3 = {"f1": 1.0, "f2": 1.0, "f3":8.0}
   self.assertEqual("1", classifier.classify(fs1))
   self.assertEqual("2", classifier.classify(fs2))
   self.assertEqual("3", classifier.classify(fs3))
Esempio n. 23
0
File: main.py Progetto: Sette/ia
def main():
    rede = p.Perceptron(taxa_aprendizado=0.1, iteracoes=1000, limiar=0.6)
    rede.loadAmostras("and.csv")
    rede.treinar()
    #teste = [1,0.5,1,0]
    #rede.testar(teste, 'Sim', 'Não')

    for teste in rede.amostras:
        rede.testar(teste, 'Verdadeiro', 'Falso')
Esempio n. 24
0
    def treina(self):
        #cria camada de perceptrons
        print(">> Inicializando rede")
        for i in range(self.n_classes):
            self.camada.append(
                perceptron.Perceptron(taxa_aprendizado=self.taxa_aprendizado,
                                      epocas=self.epocas,
                                      bias=self.bias))
            self.camada[i].inicializa_pesos(len(self.amostras[0]))

        #insere 1 no início de cada tupla do conjunto de amostras
        for i in range(len(self.amostras)):
            self.amostras[i].insert(0, 1)

        #calcula erro da época 0 e inicializa o contador de erros
        print(">> Erros da epoca 0")
        erro_count = 0
        for i in tqdm(range(len(self.amostras))):
            if (self.testa_rede(self.amostras[i]) != self.saidas[i].index(
                    max(self.saidas[i]))):
                erro_count = erro_count + 1
        self.erros.append(erro_count)

        #controle de épocas
        print(">> Treinando perceptrons")
        for epocas_count in range(0, self.epocas):
            print("\n>> Epoca " + str(epocas_count + 1) + "/" +
                  str(self.epocas))
            #contador do erro absoluto por epoca de treino
            erro_count = 0

            #para cada amostra no conjunto de treino
            for i in tqdm(range(len(self.amostras))):

                #lista dos sinais e somas(potencial de ativação) de cada perceptron para uma dada amostra
                lista_sinais = []

                for j in range(len(self.camada)):
                    lista_sinais.append(self.camada[j].sinal(
                        self.camada[j].soma(self.amostras[i])))

                #aplica o a função de apendizado para uma dada amostra
                for j in range(len(self.camada)):
                    if (lista_sinais[j] != self.saidas[i][j]):
                        self.camada[j].treina(
                            amostras_treino=self.amostras[i],
                            gabarito_treino=self.saidas[i][j],
                            y=lista_sinais[j])

            #calcula os erros de uma dada época
            for i in tqdm(range(len(self.amostras))):
                if (self.testa_rede(self.amostras[i]) != self.saidas[i].index(
                        max(self.saidas[i]))):
                    erro_count = erro_count + 1

            #carrega os erros computados em uma dada época na lista
            self.erros.append(erro_count)
Esempio n. 25
0
def test_n_tuple_boundedness():
    """
    Description
    ---
    tests the raising of the VectorError which occurs when a 
    n_tuple length is inputed that is not 1 or greater
    """
    with pytest.raises(perceptron_errors.VectorError):
        perceptron.Perceptron(-np.random.randint(low=0, high=10))
Esempio n. 26
0
def test_perceptron_taux_apprentissage(e, max_iteration):
    df = red_wines
    connu = df.drop(['quality'],
                    axis=1)  #Tableau qui contient les données connues
    prediction = df[
        'quality']  #Tableau qui contient les données à prédire (les classes)
    temp = np.empty(
        [20,
         4])  #Tableau récapitulatif des scores de chaque taux d'apprentissage
    i = 0  #variable pour parcourir temp
    for j in np.arange(0, 1, 0.05):  #Le taux d'apprentissage varie de 0 à 1
        temp[i][0] = j  #On remplie la colonne contenant le taux d'aprentissage
        model = pt.Perceptron(
            j, e,
            max_iteration)  #Le classifieur utilisé est le perceptron créé

        #On teste le classifieur avec recall, score f1, accuracy
        score = cross_validate(model,
                               connu,
                               prediction,
                               cv=StratifiedKFold(n_splits=10,
                                                  random_state=int(
                                                      perf_counter() * 100),
                                                  shuffle=True),
                               scoring=dict(recal=make_scorer(recall_score),
                                            f1=make_scorer(f1_score),
                                            accur=make_scorer(accuracy_score)),
                               return_train_score=False)

        recall = score['test_recal'].mean()
        temp[i][1] = recall

        f1 = score['test_f1'].mean()
        temp[i][2] = f1

        accuracy = score['test_accur'].mean()
        temp[i][3] = accuracy
        i = i + 1

    #On crée un tableau avec des noms de colonnes et de lignes pour temp pour permettre une meilleure lecture
    total = pd.DataFrame(
        temp, columns=['taux d apprentissage', 'recall', 'f1', 'accuracy'])
    print("\nScore total pour les taux d'apprentissage")
    print(total)

    print("Le meilleur taux d'apprentissage est : ")
    print(total.loc[total['accuracy'].idxmax(), 'taux d apprentissage']
          )  #On prend le taux d'apprentissage avec le meilleur score accuracy

    #On crée un graphique pour représenter l'évolution du score accuracy en fonction du taux d'apprentissage
    X = total.loc[:, ['taux d apprentissage']]
    y = total.loc[:, ['accuracy']]
    plt.title(
        "Evolution du score accuracy en fonction du taux d'apprentissage")
    plt.plot(X, y)  # on crée la courbe
    plt.ylabel('score accuracy')
    plt.xlabel("taux d'apprentissage")
Esempio n. 27
0
def test1():
    testdata = np.array([[1, 2, 3, 4, 5]])
    normalized = normalize_in_range(testdata, (0, 10))

    print("data: ", testdata, " and ", normalized)
    p = perceptron.Perceptron('test')
    print(p.calculate(testdata[0]))
    #print(p.calculate(testdata[1]))
    print(p.calculate(normalized[0]))
def init_perceptrons():
    for i in range(10):
        perceptrons.append(pr.Perceptron(10 * 10, i))
    global training_inputs
    training_inputs = [np.ravel(n) for n in numbers]
    for i in range(10):
        labels = np.zeros(10)
        labels[i % 10] = 1
        perceptrons[i % 10].train(np.copy(training_inputs), labels)
Esempio n. 29
0
 def __init__(self, perceptrons):
     """Initializes all variables when the class is made.
     Contains the list of layers (a set of perceptrons) which is the network itself"""
     self.layer = []
     self.perceptron_amount = len(perceptrons)
     for i in perceptrons:
         self.layer.append(
             perceptron.Perceptron(i[0], i[1])
         )  # i[0] are the weights of each perceptron, i[1] are the biases
Esempio n. 30
0
    def __init__(self, train_data, train_label):
        self.perceptrons = []

        for i in range(0, len(set(train_label))):
            etykiety = np.copy(train_label)
            etykiety[(etykiety != i)] = -1
            etykiety[(etykiety == i)] = 1

            perceptron = per.Perceptron(eta=0.1, n_iter=500)
            perceptron.fit(train_data, etykiety)
            self.perceptrons.append(perceptron)