示例#1
0
  def train(self, params):
    """
    Train TDNN network on buffered dataset history
    :param params:
    :return:
    """
    # self.net = buildNetwork(params['encoding_num'] * params['num_lags'],
    #                         params['num_cells'],
    #                         params['encoding_num'],
    #                         bias=True,
    #                         outputbias=True)

    ds = SupervisedDataSet(params['encoding_num'] * params['num_lags'],
                           params['encoding_num'])
    history = self.window(self.history, params['learning_window'])

    n = params['encoding_num']
    for i in xrange(params['num_lags'], len(history)):
      targets = numpy.zeros((1, n))
      targets[0, :] = self.encoder.encode(history[i])

      features = numpy.zeros((1, n * params['num_lags']))
      for lags in xrange(params['num_lags']):
        features[0, lags * n:(lags + 1) * n] = self.encoder.encode(
          history[i - (lags + 1)])
      ds.addSample(features, targets)

    trainer = BackpropTrainer(self.net,
                              dataset=ds,
                              verbose=params['verbosity'] > 0)

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])
示例#2
0
class EightBitBrain(object):
    
    def __init__(self, dataset, inNodes, outNodes, hiddenNodes, classes):
        self.__dataset = ClassificationDataSet(inNodes, classes-1)
        for element in dataset:
            self.addDatasetSample(self._binaryList(element[0]), element[1])
        self.__dataset._convertToOneOfMany()
        self.__network = buildNetwork(inNodes, hiddenNodes, self.__dataset.outdim, recurrent=True)
        self.__trainer = BackpropTrainer(self.__network, learningrate = 0.01, momentum = 0.99, verbose = True)
        self.__trainer.setData(self.__dataset)

    def _binaryList(self, n):
        return [int(c) for c in "{0:08b}".format(n)]
    
    def addDatasetSample(self, argument, target):
        self.__dataset.addSample(argument, target)

    def train(self, epochs):
        self.__trainer.trainEpochs(epochs)
    
    def activate(self, information):
        result = self.__network.activate(self._binaryList(information))
        highest = (0,0)
        for resultClass in range(len(result)):
            if result[resultClass] > highest[0]:
                highest = (result[resultClass], resultClass)
        return highest[1]
 def train(self, epochs=None):
     trainer = BackpropTrainer(
         self.net,
         self.training_data
     )
     if epochs:
         trainer.trainEpochs(epochs)
     else:
         trainer.trainUntilConvergence()
示例#4
0
文件: filters.py 项目: labtempo/TMON
class PerceptronPyBrainFilter(LinearPerceptron): # PYBRAIN
    def __init__(self, *args, **kwargs):    
        super(PerceptronPyBrainFilter, self).__init__(*args, **kwargs)
        
        # input, hidden_layers, output
        self.perceptron = buildNetwork(self.num_last_measures, 0, 1, \
                                       hiddenclass=pybrain.structure.modules.SigmoidLayer, #@UndefinedVariable \
                                       outclass=pybrain.structure.modules.SigmoidLayer) #@UndefinedVariable
        
        # input dimension, target dimension
        self.pointer = 0
        self.data = SupervisedDataSet(self.num_last_measures, 1)
        for _i in xrange(self.dataset_size):
            self.data.addSample([0] * self.num_last_measures, 0)     
        self.trainer = BackpropTrainer(self.perceptron, self.data, learningrate=self.learning_rate)
        
        # This call does some internal initialization which is necessary before the net can finally
        # be used: for example, the modules are sorted topologically.
        self.perceptron.sortModules()
        

    def train(self):
        self.trainer.trainEpochs(1)
    
    
    def guess(self, x):
        return self.perceptron.activate(x)
    

    def apply(self, x):                
        if len(self.lag_buffer) < self.lag - 1:
            if len(self.last_measures) < self.num_last_measures:
                self.last_measures.append(x)
            else:
                self.lag_buffer.append(x)                  
            return x
        
        self.lag_buffer.append(x)
        #self.data.addSample(tuple(self.last_measures), self.lag_buffer[-1])
        self.data['input'][self.pointer] = np.array(self.last_measures)
                                                
        self.train()
        
        if len(self.data) == self.dataset_size:        
            #del self.data[0]
            #self.data.removeSample
            #self.data.removeSample
            pass
            
        del self.last_measures[0]
        self.last_measures.append(self.lag_buffer[0])
        
        del self.lag_buffer[0]
                        
        return self.guess(self.last_measures)
示例#5
0
class PerceptronPyBrainFilter(LinearPerceptron):  # PYBRAIN
    def __init__(self, *args, **kwargs):
        super(PerceptronPyBrainFilter, self).__init__(*args, **kwargs)

        # input, hidden_layers, output
        self.perceptron = buildNetwork(self.num_last_measures, 0, 1, \
                                       hiddenclass=pybrain.structure.modules.SigmoidLayer, #@UndefinedVariable \
                                       outclass=pybrain.structure.modules.SigmoidLayer) #@UndefinedVariable

        # input dimension, target dimension
        self.pointer = 0
        self.data = SupervisedDataSet(self.num_last_measures, 1)
        for _i in xrange(self.dataset_size):
            self.data.addSample([0] * self.num_last_measures, 0)
        self.trainer = BackpropTrainer(self.perceptron,
                                       self.data,
                                       learningrate=self.learning_rate)

        # This call does some internal initialization which is necessary before the net can finally
        # be used: for example, the modules are sorted topologically.
        self.perceptron.sortModules()

    def train(self):
        self.trainer.trainEpochs(1)

    def guess(self, x):
        return self.perceptron.activate(x)

    def apply(self, x):
        if len(self.lag_buffer) < self.lag - 1:
            if len(self.last_measures) < self.num_last_measures:
                self.last_measures.append(x)
            else:
                self.lag_buffer.append(x)
            return x

        self.lag_buffer.append(x)
        #self.data.addSample(tuple(self.last_measures), self.lag_buffer[-1])
        self.data['input'][self.pointer] = np.array(self.last_measures)

        self.train()

        if len(self.data) == self.dataset_size:
            #del self.data[0]
            #self.data.removeSample
            #self.data.removeSample
            pass

        del self.last_measures[0]
        self.last_measures.append(self.lag_buffer[0])

        del self.lag_buffer[0]

        return self.guess(self.last_measures)
示例#6
0
    def trainNetwork(self, net, dataset):
        print("Started Training: " + strftime("%Y-%m-%d %H:%M:%S", gmtime()))

        t = BackpropTrainer(net,
                            dataset,
                            learningrate=0.01,
                            momentum=0,
                            verbose=False)
        t.trainEpochs(epochs=1)

        print("Finished Training: " + strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        return t
示例#7
0
def network(dataset, input_list):
    num_words = len(input_list)
    #dividing the dataset into training and testing data
    tstdata, trndata = dataset.splitWithProportion(0.25)

    #building the network
    net = RecurrentNetwork()
    input_layer1 = LinearLayer(num_words, name='input_layer1')
    input_layer2 = LinearLayer(num_words, name='input_layer2')
    hidden_layer = TanhLayer(num_words, name='hidden_layer')
    output_layer = SoftmaxLayer(num_words, name='output_layer')
    net.addInputModule(input_layer1)
    net.addInputModule(input_layer2)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)
    net.addConnection(FullConnection(input_layer1,
                                     hidden_layer,
                                     name='in1_to_hidden'))
    net.addConnection(FullConnection(input_layer2, hidden_layer,
                                     name='in2_to_hidden'))
    net.addConnection(FullConnection(hidden_layer,
                                     output_layer,
                                     name='hidden_to_output'))
    net.addConnection(FullConnection(input_layer1,
                                     output_layer,
                                     name='in1_to_out'))
    net.addConnection(FullConnection(input_layer2,
                                     output_layer,
                                     name='in2_to_out'))
    net.sortModules()
    #backpropagation
    trainer = BackpropTrainer(net, dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    #error checking part
    for i in range(10):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['target'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['target'])
        print "epoch: %4d" % trainer.totalepochs
        print "  train error: %5.10f%%" % trnresult
        print "  test error: %5.10f%%" % tstresult
    return net
示例#8
0
def training_and_testing():
    nn = init_neural_network()

    training = learning.get_labeled_data(
        '%strain-images-idx3-ubyte.gz' % (database_folder),
        '%strain-labels-idx1-ubyte.gz' % (database_folder),
        '%strainig' % (database_folder))
    test = learning.get_labeled_data(
        '%st10k-images-idx3-ubyte.gz' % (database_folder),
        '%st10k-labels-idx1-ubyte.gz' % (database_folder),
        '%stest' % (database_folder))

    FEATURES = N_INPUT_LAYER
    print("Caracteristicas a analizar: %i" % FEATURES)
    testdata = ClassificationDataSet(FEATURES, 1, nb_classes=OUTPUT_LAYER)
    trainingdata = ClassificationDataSet(FEATURES, 1, nb_classes=OUTPUT_LAYER)

    for i in range(len(test['data'])):
        testdata.addSample(test['data'][i], test['label'][i])
    for j in range(len(training['data'])):
        trainingdata.addSample(training['data'][j], training['label'][j])

    trainingdata._convertToOneOfMany()
    testdata._convertToOneOfMany()

    trainer = BackpropTrainer(nn,
                              dataset=trainingdata,
                              momentum=MOMENTUM,
                              verbose=True,
                              weightdecay=W_DECAY,
                              learningrate=L_RATE,
                              lrdecay=L_DECAY)

    for i in range(EPOCHS):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(),
                                 trainingdata['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=testdata),
                                 testdata['class'])

        print("epoch: %4d" % trainer.totalepochs,
              "  train error: %5.2f%%" % trnresult,
              "  test error: %5.2f%%" % tstresult)
    return nn
示例#9
0
def network(dataset, input_list):
    num_words = len(input_list)
    #dividing the dataset into training and testing data
    tstdata, trndata = dataset.splitWithProportion(0.25)

    #building the network
    net = RecurrentNetwork()
    input_layer1 = LinearLayer(num_words, name='input_layer1')
    input_layer2 = LinearLayer(num_words, name='input_layer2')
    hidden_layer = TanhLayer(num_words, name='hidden_layer')
    output_layer = SoftmaxLayer(num_words, name='output_layer')
    net.addInputModule(input_layer1)
    net.addInputModule(input_layer2)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)
    net.addConnection(
        FullConnection(input_layer1, hidden_layer, name='in1_to_hidden'))
    net.addConnection(
        FullConnection(input_layer2, hidden_layer, name='in2_to_hidden'))
    net.addConnection(
        FullConnection(hidden_layer, output_layer, name='hidden_to_output'))
    net.addConnection(
        FullConnection(input_layer1, output_layer, name='in1_to_out'))
    net.addConnection(
        FullConnection(input_layer2, output_layer, name='in2_to_out'))
    net.sortModules()
    #backpropagation
    trainer = BackpropTrainer(net,
                              dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    #error checking part
    for i in range(10):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['target'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['target'])
        print "epoch: %4d" % trainer.totalepochs
        print "  train error: %5.10f%%" % trnresult
        print "  test error: %5.10f%%" % tstresult
    return net
示例#10
0
def training_and_testing():
    nn= init_neural_network()

    training = learning.get_labeled_data('%strain-images-idx3-ubyte.gz'%(database_folder),
                                '%strain-labels-idx1-ubyte.gz'%(database_folder)
                                ,'%strainig'%(database_folder))
    test = learning.get_labeled_data('%st10k-images-idx3-ubyte.gz'%(database_folder),
                                 '%st10k-labels-idx1-ubyte.gz'%(database_folder),
                                 '%stest'%(database_folder))

    FEATURES = N_INPUT_LAYER
    print("Caracteristicas a analizar: %i"%FEATURES)
    testdata = ClassificationDataSet(FEATURES,1,nb_classes=OUTPUT_LAYER)
    trainingdata = ClassificationDataSet(FEATURES,1,nb_classes=OUTPUT_LAYER)


    for i in range(len(test['data'])):
        testdata.addSample(test['data'][i],test['label'][i])
    for j in range(len(training['data'])):
        trainingdata.addSample(training['data'][j],training['label'][j])

    trainingdata._convertToOneOfMany()
    testdata._convertToOneOfMany()

    trainer = BackpropTrainer(nn,dataset=trainingdata,momentum=MOMENTUM,verbose=True,
                         weightdecay=W_DECAY,learningrate=L_RATE,lrdecay=L_DECAY)

    for i in range(EPOCHS):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(),
                                 trainingdata['class'])
        tstresult = percentError(trainer.testOnClassData(
                                 dataset=testdata), testdata['class'])

        print("epoch: %4d" % trainer.totalepochs,
                     "  train error: %5.2f%%" % trnresult,
                     "  test error: %5.2f%%" % tstresult)
    return nn
    random.seed(6)
    net = initializeTDNNnet(nDimInput=X.shape[1], nDimOutput=1, numNeurons=200)

    predictedInput = np.zeros((len(sequence), ))
    targetInput = np.zeros((len(sequence), ))
    trueData = np.zeros((len(sequence), ))
    for i in xrange(nTrain, len(sequence) - predictionStep):
        Y = net.activate(X[i])

        if i % 336 == 0 and i > numLags:
            ds = SupervisedDataSet(X.shape[1], 1)
            for i in xrange(i - nTrain, i):
                ds.addSample(X[i], T[i])
            trainer = BackpropTrainer(net, dataset=ds, verbose=1)
            trainer.trainEpochs(30)

        predictedInput[i] = Y[-1]
        targetInput[i] = sequence['data'][i + predictionStep]
        trueData[i] = sequence['data'][i]
        print "Iteration {} target input {:2.2f} predicted Input {:2.2f} ".format(
            i, targetInput[i], predictedInput[i])

    predictedInput = (predictedInput * stdSeq) + meanSeq
    targetInput = (targetInput * stdSeq) + meanSeq
    trueData = (trueData * stdSeq) + meanSeq

    saveResultToFile(dataSet, predictedInput, 'tdnn')

    plt.figure()
    plt.plot(targetInput)
示例#12
0
net.addModule(hidden_layer)
net.addOutputModule(output_layer)
net.addConnection(
    FullConnection(input_layer, hidden_layer, name='in_to_hidden'))
net.addConnection(
    FullConnection(hidden_layer, output_layer, name='hidden_to_out'))
net.sortModules()

#backpropagation
trainer = BackpropTrainer(net,
                          dataset=trndata,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)
#error checking part
for i in range(10):
    trainer.trainEpochs(1)
    trnresult = percentError(trainer.testOnClassData(), trndata['target'])
    tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                             tstdata['target'])

trigram_file = open('trigram.txt', 'w')
trigram_file.writelines(["%s\n" % item for item in sorted_list])

word_file = open('word_list', 'w')
word_file.writelines(["%s\n" % item for item in input_list])

word_file.close()
trigram_file.close()
text_file.close()
from pybrain.supervised import BackpropTrainer

# passa as dimensões dos vetores de entrada e do objetivo
dataset = SupervisedDataSet(2, 1)

dataset.addSample([1,1], [0])
dataset.addSample([1,0], [1])
dataset.addSample([0,1], [1])
dataset.addSample([0,0], [0])

#print(dataset.indim)
network = buildNetwork(dataset.indim, 4, dataset.outdim, bias=True)
trainer = BackpropTrainer(network, dataset, learningrate=0.01, momentum=0.99)

for epoch in range(1000): #  treina por 1000 epocas
    trainer.train()  

''' 
Outros modos de treinos
trainer.trainEpochs(1000)
trainer.trainUntilConvergence -> treinar até a convergência
'''

test_data = SupervisedDataSet(2, 1)

test_data.addSample([1,1], [0])
test_data.addSample([1,0], [1])
test_data.addSample([0,1], [1])
test_data.addSample([0,0], [0])

trainer.testOnData(test_data, verbose=True)
示例#14
0
  random.seed(6)
  net = initializeTDNNnet(nDimInput=X.shape[1],
                         nDimOutput=1, numNeurons=200)

  predictedInput = np.zeros((len(sequence),))
  targetInput = np.zeros((len(sequence),))
  trueData = np.zeros((len(sequence),))
  for i in xrange(nTrain, len(sequence)-predictionStep):
    Y = net.activate(X[i])

    if i % 336 == 0 and i > numLags:
      ds = SupervisedDataSet(X.shape[1], 1)
      for i in xrange(i-nTrain, i):
        ds.addSample(X[i], T[i])
      trainer = BackpropTrainer(net, dataset=ds, verbose=1)
      trainer.trainEpochs(30)

    predictedInput[i] = Y[-1]
    targetInput[i] = sequence['data'][i+predictionStep]
    trueData[i] = sequence['data'][i]
    print "Iteration {} target input {:2.2f} predicted Input {:2.2f} ".format(
      i, targetInput[i], predictedInput[i])

  predictedInput = (predictedInput * stdSeq) + meanSeq
  targetInput = (targetInput * stdSeq) + meanSeq
  trueData = (trueData * stdSeq) + meanSeq

  saveResultToFile(dataSet, predictedInput, 'tdnn')

  plt.figure()
  plt.plot(targetInput)
示例#15
0
print(len(ds))

print 'starting training'
# trainer = RPropMinusTrainer(n, dataset=ds)
# trainer = BackpropTrainer(n, dataset=ds)
# trainer.trainUntilConvergence()
# trainer.train()

trainer = BackpropTrainer(net, ds)

train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 10
CYCLES = 50
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
    trainer.trainEpochs(EPOCHS_PER_CYCLE)
    train_errors.append(trainer.testOnData())
    epoch = (i+1) * EPOCHS_PER_CYCLE
#    print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
    print(epoch, EPOCHS, train_errors[-1])
#     #stdout.flush()
print()
print("final error =", train_errors[-1])


# Plot the errors (note that in this simple toy example, we are testing and training on the same dataset, which is of course not what you'd do for a real project!):
# plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
# plt.xlabel('epoch')
# plt.ylabel('error')
# plt.show()
示例#16
0
    trndata._convertToOneOfMany( bounds=[0.,1.] )
    tstdata._convertToOneOfMany( bounds=[0.,1.] )

    if exists("params.xml"):
        rnn = NetworkReader.readFrom('params.xml')
    else:
        # construct LSTM network - note the missing output bias
        rnn = buildNetwork( trndata.indim, 5, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)

    # define a training method
    trainer = BackpropTrainer( rnn, dataset=trndata, momentum=0.1, weightdecay=0.01)

    # lets training (exclamation point)
    for i in range(100):
    	# setting the ephocs for the training
        trainer.trainEpochs( 2 )
        # calculating the error
        trnresult = (1.0-testOnSequenceData(rnn, trndata))
        tstresult = (1.0-testOnSequenceData(rnn, tstdata))
        #print("train error: %5.2f%%" % trnresult, ",  test error: %5.2f%%" % tstresult)

        # activating the softmax layer
        out = rnn.activate(X_train[0])
        out = out.argmax(axis=0)

    
    index=0

    # evaluate the net in test data
    result = []
    for x in X_test:
示例#17
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import SoftmaxLayer
# 输入数据是 19维,输出是两维,隐层设置为5层
# 输出层使用Softmax激活,其他:学习率(learningrate=0.01),学习率衰减(lrdecay=1.0,每次训练一步学习率乘以),
# 详细(verbose=False)动量因子(momentum=0最后时步的梯度?),权值衰减?(weightdecay=0.0)

n_h = 5
net = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)

# Step 2 : 构建前馈网络标准BP算法
from pybrain.supervised import BackpropTrainer
trainer_sd = BackpropTrainer(net, traindata)

# # 或者使用累积BP算法,训练次数50次
# trainer_ac = BackpropTrainer(net, traindata, batchlearning=True)
# trainer_ac.trainEpochs(50)
# err_train, err_valid = trainer_ac.trainUntilConvergence(maxEpochs=50)

for i in range(50):  # 训练50次,每及测试结果次打印训练结果
    trainer_sd.trainEpochs(1)  # 训练网络一次,

    # 引入训练误差和测试误差
    from pybrain.utilities import percentError
    trainresult = percentError(trainer_sd.testOnClassData(),
                               traindata['class'])
    testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),
                              testdata['class'])
    # 打印错误率
    print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,
          'test error: ', testresult)
示例#18
0
net.addOutputModule(output_layer)
net.addConnection(FullConnection(input_layer,
                                 hidden_layer,
                                 name='in_to_hidden'))
net.addConnection(FullConnection(hidden_layer,
                                 output_layer,
                                 name='hidden_to_out'))
net.sortModules()

#backpropagation
trainer = BackpropTrainer(net, dataset=trndata,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)
#error checking part
for i in range(10):
    trainer.trainEpochs(1)
    trnresult = percentError(trainer.testOnClassData(), trndata['target'])
    tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                             tstdata['target'])

trigram_file = open('trigram.txt', 'w')
trigram_file.writelines(["%s\n" % item for item in sorted_list])

word_file = open('word_list', 'w')
word_file.writelines(["%s\n" % item for item in input_list])

word_file.close()
trigram_file.close()
text_file.close()
示例#19
0
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer

# passa as dimensões dos vetores de entrada e do objetivo
dataset = SupervisedDataSet(2, 1)

dataset.addSample([1, 1], [0])
dataset.addSample([1, 0], [1])
dataset.addSample([0, 1], [1])
dataset.addSample([0, 0], [0])

network = buildNetwork(dataset.indim, 4, dataset.outdim, bias=True)
trainer = BackpropTrainer(network, dataset, learningrate=0.01, momentum=0.99)
'''
for epoch in range(1000): # treina por 1000 épocas
	trainer.train()
'''

trainer.trainEpochs(1000)
'''
	treinar até a convergência: trainer.trainUntilConvergence
'''

test_data = SupervisedDataSet(2, 1)
test_data.addSample([1, 1], [0])
test_data.addSample([1, 0], [1])
test_data.addSample([0, 1], [1])
test_data.addSample([0, 0], [0])
trainer.testOnData(test_data, verbose=True)
示例#20
0
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer

# UTILIZANDO REDES NEURAIS PARA SIMULAR PORTA LOGICA AND

# DEFININDO QUANTAS ENTRADAS E QUANTAS SAIDAS A REDE NEURAL DEVE POSSUIR
parametros = SupervisedDataSet(2, 1)

# DEFININDO PARAMETROS DE ENTRADA E SAIDA
parametros.addSample([0, 0], [0])
parametros.addSample([0, 1], [0])
parametros.addSample([1, 0], [0])
parametros.addSample([1, 1], [1])

# CONSTRUINDO A REDE NEURAL
# 2 - PARAMETROS DE ENTRADA
# 10 - NEURONIOS NA CAMADA INTERMEDIARIA
# 1 - SAIDA
rede_neural = buildNetwork(2, 10, 1, bias=True, outputbias=True)

# TREINANDO A REDE NEURAL
treinamento = BackpropTrainer(rede_neural, parametros, momentum=0.5)
treinamento.trainEpochs(1000)

# SIMULANDO A REDE NEURAL
parametros_teste = SupervisedDataSet(2, 1)

# DEFININDO PARAMETROS DE ENTRADA E SAIDA
parametros_teste.addSample([0, 0], [0])
treinamento.testOnData(parametros_teste, True)