Esempio n. 1
0
 def __init__(self,
              input_count,
              output_count,
              activation_function=Step(1, 0)):
     NeuralNetwork.__init__(self)
     self.create_layer(input_count, WeightedSum())
     self.create_layer(output_count, WeightedSum(), activation_function)
Esempio n. 2
0
 def __init__(self, neuron_count, y_high, y_low):
     """
     :type neuron_count: int
     :type y_high: float
     :type y_low: float
     """
     Layer.__init__(self, neuron_count, WeightedSum(), Step(y_high, y_low))
     self.connect_neurons(self)
Esempio n. 3
0
from synapyse.impl.learning.back_propagation import BackPropagation
from synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron

__author__ = 'Douglas Eric Fonseca Rodrigues'

training_set = TrainingSet(2, 1)

training_set \
    .append([0.0, 0.0], [0.0]) \
    .append([0.0, 1.0], [1.0]) \
    .append([1.0, 0.0], [1.0]) \
    .append([1.0, 1.0], [0.0])

n = MultiLayerPerceptron()

n \
    .create_layer(2, WeightedSum()) \
    .create_layer(3, WeightedSum(), Tanh(2)) \
    .create_layer(1, WeightedSum(), Tanh(2))

n.randomize_weights()

b = BackPropagation(n, learning_rate=0.1, max_error=0.01)

b.on_after_iteration = lambda x: print(x.actual_iteration, ':', x.
                                       total_network_error)

b.learn(training_set)

for training_set_row in training_set:
    print(n.set_input(training_set_row.input_pattern).compute().output)
Esempio n. 4
0
# Importa os dados para treinamento
dados_treinamento = TrainingSet(input_count=4, output_count=3) \
    .import_from_file('iris_training.data')

# Importa os dados para teste
dados_teste = TrainingSet(input_count=4, output_count=3) \
    .import_from_file('iris_testing.data')

# Testa diversas taxas de aprendizagem
for taxa_aprendizado in [0.01, 0.1, 0.3, 0.5, 0.7, 0.9]:

    # Cria a rede neural artificial
    rede_neural = MultiLayerPerceptron() \
        .create_layer(neuron_count=4,
                      input_function=WeightedSum()) \
        .create_layer(neuron_count=3,
                      input_function=WeightedSum(),
                      activation_function=Sigmoid()) \
        .create_layer(neuron_count=3,
                      input_function=WeightedSum(),
                      activation_function=Sigmoid()) \
        .randomize_weights()

    # Cria o algoritmo de aprendizado
    aprendizado = MomentumBackPropagation(neural_network=rede_neural,
                                          learning_rate=taxa_aprendizado,
                                          max_error=0.05,
                                          momentum=0,
                                          max_iterations=30)
Esempio n. 5
0
from synapyse.impl.learning.back_propagation import BackPropagation
from synapyse.base.learning.training_set import TrainingSet
from synapyse.impl.activation_functions.sigmoid import Sigmoid
from synapyse.impl.input_functions.weighted_sum import WeightedSum
from synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron

__author__ = 'Douglas Eric Fonseca Rodrigues'

# Creating a training_set based in a text file
training_set = TrainingSet(13, 1) \
    .import_from_file('heart_disease.txt', ',') \
    .normalize()

# Creating and configuring the network
multi_layer_perceptron = MultiLayerPerceptron() \
    .create_layer(13, WeightedSum()) \
    .create_layer(8, WeightedSum(), Sigmoid()) \
    .create_layer(1, WeightedSum(), Sigmoid()) \
    .randomize_weights()

# Generating an excel-file result
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('heart_disease.xlsx')

for learning_rate in [0.01, 0.1, 0.3, 0.5, 0.7]:

    print('Running learning_rate =', learning_rate, '...')

    # Creating and configuring the learning method
    momentum_backpropagation = BackPropagation(
        neural_network=multi_layer_perceptron,
Esempio n. 6
0
    .append([1.0, 1.0, 1.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0, 0.0]) \
    .append([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0]) \
    .append([1.0, 0.0, 1.0, 1.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0, 0.0]) \
    .append([0.0, 1.0, 1.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0, 1.0]) \
    .append([0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0]) \
    .append([1.0, 0.0, 0.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0, 1.0]) \
    .append([1.0, 0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 1.0, 0.0]) \
    .append([0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 0.0, 1.0, 1.0, 1.0]) \
    .append([1.0, 1.0, 0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0, 0.0]) \
    .append([1.0, 1.0, 0.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0]) \
    .append([1.0, 0.0, 0.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0, 0.0])

neural_network = MultiLayerPerceptron()

neural_network \
    .create_layer(6, WeightedSum()) \
    .create_layer(7, WeightedSum(), Sigmoid()) \
    .create_layer(5, WeightedSum(), Sigmoid()) \
    .randomize_weights()

b = MomentumBackPropagation(neural_network=neural_network,
                            learning_rate=0.1,
                            momentum=0.4,
                            max_error=0.02)

b.on_after_iteration = lambda obj: print(obj.actual_iteration, ':', obj.
                                         total_network_error)

b.learn(training_set)

print('total_error=', b.total_network_error)
Esempio n. 7
0
from synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron
from synapyse.util.logger import Logger

__author__ = 'Douglas Eric Fonseca Rodrigues'

Logger.enable_logger(Logger.INFO)
sim = 0.3

# Creating a training_set based in a text file
training_set_training, training_set_test = TrainingSet(21, 4) \
    .import_from_file('car_evaluation.txt', ',') \
    .slice(80)

# Creating and configuring the network
multi_layer_perceptron = MultiLayerPerceptron() \
    .create_layer(21, WeightedSum()) \
    .create_layer(14, WeightedSum(), Sigmoid()) \
    .create_layer(4, WeightedSum(), Sigmoid()) \
    .randomize_weights()

# Creating and configuring the learning method
momentum_backpropagation = MomentumBackPropagation(
    neural_network=multi_layer_perceptron,
    learning_rate=0.3,
    momentum=0.6,
    max_error=0.001)

# Configuring a log after each learning method iteration
momentum_backpropagation.on_after_iteration = lambda b: print(
    b.actual_iteration, ':', b.total_network_error)
Esempio n. 8
0
from synapyse.impl.activation_functions.linear import Linear
from synapyse.impl.activation_functions.tanh import Tanh
from synapyse.impl.input_functions.weighted_sum import WeightedSum
from synapyse.impl.learning.back_propagation import BackPropagation
from synapyse.impl.multi_layer_perceptron import MultiLayerPerceptron

__author__ = 'Douglas Eric Fonseca Rodrigues'

training_set = TrainingSet(2, 1)

training_set.append([0.0, 1.0], [1.0])

n = MultiLayerPerceptron()

n \
    .create_layer(2, WeightedSum(), Linear()) \
    .create_layer(2, WeightedSum(), Tanh(2)) \
    .create_layer(2, WeightedSum(), Tanh(2)) \
    .create_layer(1, WeightedSum(), Tanh(2))

n00 = n.layers[0].neurons[0]
n01 = n.layers[0].neurons[1]
b0 = n.layers[0].neurons[2]

n10 = n.layers[1].neurons[0]
n11 = n.layers[1].neurons[1]
b1 = n.layers[1].neurons[2]

n20 = n.layers[2].neurons[0]
n21 = n.layers[2].neurons[1]
b2 = n.layers[2].neurons[2]