Beispiel #1
0
dados_treinamento = TrainingSet(input_count=4, output_count=3) \
    .import_from_file('iris_training.data')

# Importa os dados para teste
dados_teste = TrainingSet(input_count=4, output_count=3) \
    .import_from_file('iris_testing.data')

# Testa diversas taxas de aprendizagem
for taxa_aprendizado in [0.01, 0.1, 0.3, 0.5, 0.7, 0.9]:

    # Cria a rede neural artificial
    rede_neural = MultiLayerPerceptron() \
        .create_layer(neuron_count=4,
                      input_function=WeightedSum()) \
        .create_layer(neuron_count=3,
                      input_function=WeightedSum(),
                      activation_function=Sigmoid()) \
        .create_layer(neuron_count=3,
                      input_function=WeightedSum(),
                      activation_function=Sigmoid()) \
        .randomize_weights()

    # Cria o algoritmo de aprendizado
    aprendizado = MomentumBackPropagation(neural_network=rede_neural,
                                          learning_rate=taxa_aprendizado,
                                          max_error=0.05,
                                          momentum=0,
                                          max_iterations=30)

    print('Treinamento iniciado com taxa de aprendizado =', taxa_aprendizado)

    # Inicia o aprendizado
from synapyse.impl.activation_functions.tanh import Tanh
from synapyse.impl.input_functions.weighted_sum import WeightedSum
from synapyse.impl.learning.back_propagation import BackPropagation
from synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron

__author__ = 'Douglas Eric Fonseca Rodrigues'

training_set = TrainingSet(2, 1)

training_set \
    .append([0.0, 0.0], [0.0]) \
    .append([0.0, 1.0], [1.0]) \
    .append([1.0, 0.0], [1.0]) \
    .append([1.0, 1.0], [0.0])

n = MultiLayerPerceptron()

n \
    .create_layer(2, WeightedSum()) \
    .create_layer(3, WeightedSum(), Tanh(2)) \
    .create_layer(1, WeightedSum(), Tanh(2))

n.randomize_weights()

b = BackPropagation(n, learning_rate=0.1, max_error=0.01)

b.on_after_iteration = lambda x: print(x.actual_iteration, ':', x.
                                       total_network_error)

b.learn(training_set)
Beispiel #3
0
from synapyse.base.learning.training_set import TrainingSet
from synapyse.impl.activation_functions.sigmoid import Sigmoid
from synapyse.impl.input_functions.weighted_sum import WeightedSum
from synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron

__author__ = 'Douglas Eric Fonseca Rodrigues'

# Creating a training_set based in a text file
training_set = TrainingSet(13, 1) \
    .import_from_file('heart_disease.txt', ',') \
    .normalize()

# Creating and configuring the network
multi_layer_perceptron = MultiLayerPerceptron() \
    .create_layer(13, WeightedSum()) \
    .create_layer(8, WeightedSum(), Sigmoid()) \
    .create_layer(1, WeightedSum(), Sigmoid()) \
    .randomize_weights()

# Generating an excel-file result
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('heart_disease.xlsx')

for learning_rate in [0.01, 0.1, 0.3, 0.5, 0.7]:

    print('Running learning_rate =', learning_rate, '...')

    # Creating and configuring the learning method
    momentum_backpropagation = BackPropagation(
        neural_network=multi_layer_perceptron,
        learning_rate=learning_rate,
from synapyse.impl.input_functions.weighted_sum import WeightedSum
from synapyse.impl.learning.back_propagation import BackPropagation
from synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron


__author__ = 'Douglas Eric Fonseca Rodrigues'

training_set = TrainingSet(2, 1)

training_set \
    .append([0.0, 0.0], [0.0]) \
    .append([0.0, 1.0], [1.0]) \
    .append([1.0, 0.0], [1.0]) \
    .append([1.0, 1.0], [0.0])

n = MultiLayerPerceptron()

n \
    .create_layer(2, WeightedSum()) \
    .create_layer(3, WeightedSum(), Tanh(2)) \
    .create_layer(1, WeightedSum(), Tanh(2))

n.randomize_weights()

b = BackPropagation(n, learning_rate=0.1, max_error=0.01)

b.on_after_iteration = lambda x: print(x.actual_iteration, ':', x.total_network_error)

b.learn(training_set)

for training_set_row in training_set:
Beispiel #5
0
    .append([1.0, 1.0, 0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 1.0, 0.0, 1.0]) \
    .append([1.0, 1.0, 0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 0.0]) \
    .append([1.0, 0.0, 0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0]) \
    .append([1.0, 1.0, 1.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0, 0.0]) \
    .append([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0]) \
    .append([1.0, 0.0, 1.0, 1.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0, 0.0]) \
    .append([0.0, 1.0, 1.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0, 1.0]) \
    .append([0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0]) \
    .append([1.0, 0.0, 0.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0, 1.0]) \
    .append([1.0, 0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 1.0, 0.0]) \
    .append([0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 0.0, 1.0, 1.0, 1.0]) \
    .append([1.0, 1.0, 0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0, 0.0]) \
    .append([1.0, 1.0, 0.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0]) \
    .append([1.0, 0.0, 0.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0, 0.0])

neural_network = MultiLayerPerceptron()

neural_network \
    .create_layer(6, WeightedSum()) \
    .create_layer(7, WeightedSum(), Sigmoid()) \
    .create_layer(5, WeightedSum(), Sigmoid()) \
    .randomize_weights()

b = MomentumBackPropagation(neural_network=neural_network,
                            learning_rate=0.1,
                            momentum=0.4,
                            max_error=0.02)

b.on_after_iteration = lambda obj: print(obj.actual_iteration, ':', obj.
                                         total_network_error)
Beispiel #6
0
dados_treinamento = TrainingSet(input_count=4, output_count=3) \
    .import_from_file('iris_training.data')

# Importa os dados para teste
dados_teste = TrainingSet(input_count=4, output_count=3) \
    .import_from_file('iris_testing.data')

# Testa diversas taxas de aprendizagem
for numero_neuros_camada_oculta in [1, 2, 3, 4, 5, 6, 7]:

    # Cria a rede neural artificial
    rede_neural = MultiLayerPerceptron() \
        .create_layer(neuron_count=4,
                      input_function=WeightedSum()) \
        .create_layer(neuron_count=numero_neuros_camada_oculta,
                      input_function=WeightedSum(),
                      activation_function=Sigmoid()) \
        .create_layer(neuron_count=3,
                      input_function=WeightedSum(),
                      activation_function=Sigmoid()) \
        .randomize_weights()

    # Cria o algoritmo de aprendizado
    aprendizado = MomentumBackPropagation(neural_network=rede_neural,
                                          learning_rate=0.7,
                                          max_error=0.05,
                                          momentum=0,
                                          max_iterations=30)

    print('Rede com número de neurônios na camada oculta =',
          numero_neuros_camada_oculta)
Beispiel #7
0
import json

import jsonpickle
from synapyse.base.learning.training_set import TrainingSet
from synapyse.impl.activation_functions.tanh import Tanh
from synapyse.impl.input_functions.weighted_sum import WeightedSum
from synapyse.impl.learning.back_propagation import BackPropagation
from synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron

__author__ = 'Douglas Eric Fonseca Rodrigues'

training_set = TrainingSet(2, 1)

training_set.append([1.0, 1.0], [1.0])

n = MultiLayerPerceptron()

n \
    .create_layer(2, WeightedSum()) \
    .create_layer(3, WeightedSum(), Tanh(2)) \
    .create_layer(1, WeightedSum(), Tanh(2))

n00 = n.layers[0].neurons[0]
n01 = n.layers[0].neurons[1]
b0 = n.layers[0].neurons[2]

n10 = n.layers[1].neurons[0]
n11 = n.layers[1].neurons[1]
n12 = n.layers[1].neurons[2]
b1 = n.layers[1].neurons[3]