Exemple #1
0
def train_perceptron():
    network = Perceptron()
    input_count = len(dataset[0].inputs)
    print('----------------------------')
    print('Generating layers')
    for _ in range(input_count):
        network.s_layer.add_neuron(None, lambda value: value)
    print('S-layer generated')

    a_neurons_count = 2 ** input_count - 1
    for position in range(a_neurons_count):
        neuron = ANeuron(None, lambda value: int(value >= 0))
        # инициализация весов нейронов А слоя
        neuron.input_weights = [
            random.choice([-1, 0, 1]) for i in range(input_count)
        ]
        neuron.calculate_bias()
        network.a_layer.neurons.append(neuron)
    print('A-layer generated')

    for _ in range(NUMBER_COUNT):
        network.r_layer.add_neuron(a_neurons_count, lambda: 0, lambda value: 1 if value >=0 else -1, 0.01, 0)
    print('R-layer generated')

    network.train(dataset)
    network.optimize(dataset)
    return network
Exemple #2
0
    def testPlot(self):
        size = len(self.dataset)
        step = size // 20
        start = (size % 20)+1

        for to in range(start,size,step):
            per = Perceptron([0.1 for i in range(len(self.dataset[0])-1)])
            per.train(self.dataset[:to],0.6)
            self.tester.setDataset(self.dataset)
            sr = self.tester.testPerceptron(per)
            plt.scatter(to, sr, color="black")

        plt.show()
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split

from perceptron.perceptron import Perceptron
from utils.abalone_data import get_abalone

x, y = get_abalone()

print(x.shape)
print(y.shape)

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20)

model = Perceptron(input_shape=[1, 9])

model.train(x_train, y_train, 0.0005, 100)

plt.plot(model.historical_error)
plt.show()
Exemple #4
0
print(perAnd.binaryOutput([-1, 1, 1]))
print(perAnd.binaryOutput([1, -1, 1]))
print(perAnd.binaryOutput([1, 1, 1]))

# As the states are limited, we use them several times to train the perceptron.
trainingDataset = [[-1, -1, 1, -1], [-1, 1, 1, -1], [1, -1, 1,
                                                     -1], [1, 1, 1, 1],
                   [-1, -1, 1, -1], [-1, 1, 1, -1], [1, -1, 1,
                                                     -1], [1, 1, 1, 1],
                   [-1, -1, 1, -1], [-1, 1, 1, -1], [1, -1, 1,
                                                     -1], [1, 1, 1, 1],
                   [-1, -1, 1, -1], [-1, 1, 1, -1], [1, -1, 1,
                                                     -1], [1, 1, 1, 1],
                   [-1, -1, 1, -1], [-1, 1, 1, -1], [1, -1, 1,
                                                     -1], [1, 1, 1, 1],
                   [-1, -1, 1, -1], [-1, 1, 1, -1], [1, -1, 1, -1],
                   [1, 1, 1, 1]]
perAnd.train(trainingDataset)

print("After Training:")
print("w1: %f, w2: %f, bias: %f" %
      (perAnd.getWeight(0), perAnd.getWeight(1), perAnd.getWeight(2)))
print(perAnd.binaryOutput([-1, -1, 1]))
print(perAnd.binaryOutput([-1, 1, 1]))
print(perAnd.binaryOutput([1, -1, 1]))
print(perAnd.binaryOutput([1, 1, 1]))

tester = Tester()
tester.setDataset([[-1, -1, 1, -1], [-1, 1, 1, -1], [1, -1, 1, -1],
                   [1, 1, 1, 1]])
print("Success rate: ", tester.testPerceptron(perAnd))