Example #1
0
 def __init__(self, net, task, valueNetwork=None, **args):
     self.net = net
     self.task = task
     self.setArgs(**args)
     if self.valueLearningRate == None:
         self.valueLearningRate = self.learningRate
     if self.valueMomentum == None:
         self.valueMomentum = self.momentum        
     if self.supervisedPlotting:
         from pylab import ion
         ion() 
     
     # adaptive temperature:
     self.tau = 1.
     
     # prepare the datasets to be used
     self.weightedDs = ImportanceDataSet(self.task.outdim, self.task.indim)
     self.rawDs = ReinforcementDataSet(self.task.outdim, self.task.indim)
     self.valueDs = SequentialDataSet(self.task.outdim, 1)
     
     # prepare the supervised trainers
     self.bp = BackpropTrainer(self.net, self.weightedDs, self.learningRate,
                               self.momentum, verbose=False,
                               batchlearning=True)            
     
     # CHECKME: outsource
     self.vnet = valueNetwork
     if valueNetwork != None:
         self.vbp = BackpropTrainer(self.vnet, self.valueDs, self.valueLearningRate,
                                    self.valueMomentum, verbose=self.verbose)
         
     # keep information:
     self.totalSteps = 0
     self.totalEpisodes = 0
Example #2
0
  def train(self, params):
    """
    Train TDNN network on buffered dataset history
    :param params:
    :return:
    """
    # self.net = buildNetwork(params['encoding_num'] * params['num_lags'],
    #                         params['num_cells'],
    #                         params['encoding_num'],
    #                         bias=True,
    #                         outputbias=True)

    ds = SupervisedDataSet(params['encoding_num'] * params['num_lags'],
                           params['encoding_num'])
    history = self.window(self.history, params['learning_window'])

    n = params['encoding_num']
    for i in xrange(params['num_lags'], len(history)):
      targets = numpy.zeros((1, n))
      targets[0, :] = self.encoder.encode(history[i])

      features = numpy.zeros((1, n * params['num_lags']))
      for lags in xrange(params['num_lags']):
        features[0, lags * n:(lags + 1) * n] = self.encoder.encode(
          history[i - (lags + 1)])
      ds.addSample(features, targets)

    trainer = BackpropTrainer(self.net,
                              dataset=ds,
                              verbose=params['verbosity'] > 0)

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])
Example #3
0
def trainedANN():
    n = FeedForwardNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.sortModules()

    draw_connections(n)
    # d = generateTrainingData()
    d = getDatasetFromFile(root.path() + "/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 20:
            return trainedANN()

    exportANN(n)
    draw_connections(n)

    return n
Example #4
0
def trainedRNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
    # n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
    n.sortModules()

    draw_connections(n)
    d = getDatasetFromFile(root.path() + "/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 50:
            return trainedRNN()
    # exportRNN(n)
    draw_connections(n)

    return n
Example #5
0
def trained_cat_dog_ANN():
    n = FeedForwardNetwork()
    d = get_cat_dog_trainset()
    input_size = d.getDimension('input')
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(SigmoidLayer(input_size + 1500, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.sortModules()
    n.convertToFastNetwork()
    print 'successful converted to fast network'
    t = BackpropTrainer(n, d, learningrate=0.0001)  #, momentum=0.75)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        count += 1
        if globErr < 0.01:
            break
        if count == 30:
            break

    exportCatDogANN(n)
    return n
Example #6
0
def trained_cat_dog_RFCNN():
    n = RecurrentNetwork()

    d = get_cat_dog_trainset()
    input_size = d.getDimension('input')
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(SigmoidLayer(input_size + 1500, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
    n.sortModules()

    t = BackpropTrainer(n, d, learningrate=0.0001)  #, momentum=0.75)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        count += 1
        if globErr < 0.01:
            break
        if count == 30:
            break

    exportCatDogRFCNN(n)
    return n
Example #7
0
def testOldTraining(hidden=15, n=None):
    d = XORDataSet()
    if n is None:
        n = buildNetwork(d.indim, hidden, d.outdim, recurrent=False)
    t = BackpropTrainer(n, learningrate=0.01, momentum=0., verbose=False)
    t.trainOnDataset(d, 250)
    t.testOnData(verbose=True)
Example #8
0
def training(d):
    """
    Builds a network and trains it.
    """
    n = buildNetwork(d.indim, 4, d.outdim, recurrent=True)
    t = BackpropTrainer(n, d, learningrate=0.01, momentum=0.99, verbose=True)
    for epoch in range(0, 1000):
        t.train()
    return t
Example #9
0
def main(f_samples):
    f_reading = open(f_samples, 'r')
    global data
    data = []

    for line in f_reading:
        line = line.split()
        data.append( (float(line[0]), float(line[-1])) )

    #function
    data_module = lambda x: map( lambda z: data[z], filter( lambda y: y% 5 == x, xrange(len(data)) ) )

    global data1
    data1 = [data_module(0), data_module(1), data_module(2), data_module(3), data_module(4)]

    global data_transformed
    data_transformed = take(data, rate = 60)

    global data_transformed_training
    data_transformed_training = map( lambda x: data_transformed[x], filter( lambda x: uniform(0, 1) > 0.3, xrange(len(data_transformed)) ))

    #Learning process-----------------------------------------------------------------

    global net, samples, trainer
    net = FeedForwardNetwork()
    inLayer = LinearLayer(3)
    hiddenLayer0 = SigmoidLayer(1)
    hiddenLayer1 = SigmoidLayer(3)
    outLayer = LinearLayer(1)

    net.addInputModule(inLayer)
#    net.addModule(hiddenLayer0)
#    net.addModule(hiddenLayer1)
    net.addOutputModule(outLayer)

#    net.addConnection(FullConnection(inLayer, hiddenLayer0))
    net.addConnection(FullConnection(inLayer, outLayer))
#    net.addConnection(FullConnection(hiddenLayer0, outLayer))
#    net.addConnection(FullConnection(hiddenLayer0, hiddenLayer1))
#    net.addConnection(FullConnection(hiddenLayer1, outLayer))
    net.sortModules()
    print net
    ##Net with 3 inputs, 8 hidden neurons in a layerand 8 in another, and 1 out.
    #net = buildNetwork(3,8,8,1)
    ##Set with 2 inputs and one output for each sample
    samples = SupervisedDataSet(3,1)

    for i in data_transformed_training:
        samples.addSample(i['past'], i['next'] - i['average'])
    trainer = BackpropTrainer(net, samples)

    print 'Training'
    trainer.trainUntilConvergence(maxEpochs= 10)

    print 'Comparing'
    compare_net_samples(net, data_transformed)
    print "Number of samples %d for training." %len(data_transformed_training)
Example #10
0
def neuralNetworkRegression(X,Y):
    print ("NEURAL NETWORK REGRESSION")
    print ("Executing...")

    X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size = 0.10, random_state = 5)
    Y_test = Y_test.reshape(-1,1)
    Y_train = Y_train.reshape(-1,1)
    RMSEerror = []

    train = np.vstack((X_train, X_test))  # append both testing and training into one array
    outputTrain = np.vstack((Y_train, Y_test))
    outputTrain = outputTrain.reshape( -1, 1 )

    inputSize = train.shape[1]
    targetSize = outputTrain.shape[1]

    ds = SupervisedDataSet(inputSize, targetSize)
    ds.setField('input', train)
    ds.setField('target', outputTrain)

    hiddenSize = 100
    epochs = 100  # got after parameter tuning

    # neural network training model
    net = buildNetwork( inputSize, hiddenSize, targetSize, bias = True )
    trainer = BackpropTrainer(net, ds)

    # uncomment out to plot epoch vs rmse
    # takes time to execute as gets best epoch value
    # getting the best value of epochs

    print ("training for {} epochs...".format( epochs ))
    '''
    for i in range(epochs):
        print (i)
        mse = trainer.train()
        rmse = mse ** 0.5
        RMSEerror.append(rmse)

    plt.plot(range(epochs), RMSEerror)
    plt.xlabel("Epochs")
    plt.ylabel("RMSE")
    plt.title("RMSE vs Epochs")
    plt.savefig("../Graphs/Network/Question 2c/RMSE vs Epochs.png")

    plt.show()
    '''
    print ("Model training in process...")
    train_mse, validation_mse = trainer.trainUntilConvergence(verbose = True, validationProportion = 0.15, maxEpochs = epochs, continueEpochs = 10)
    p = net.activateOnDataset(ds)
    
    mse = mean_squared_error(outputTrain, p)
    rmse = mse ** 0.5

    print ("Root Mean Squared Error for Best Parameters : " + str(rmse))
Example #11
0
def result(request, form):
    dataset = SupervisedDataSet(2, 1)
    dados = form.cleaned_data

    # Adiciona a tabela XOR
    dataset.addSample([0, 0], [0])
    dataset.addSample([0, 1], [1])
    dataset.addSample([1, 0], [1])
    dataset.addSample([1, 1], [0])

    if dados['bias'] is None:
        bias = False
    else:
        bias = True

    # dimensões de entrada e saida, argumento 2 é a quantidade de camadas intermediárias
    network = buildNetwork(dataset.indim, int(dados['num_camadas']), dataset.outdim, bias=bias)
    trainer = BackpropTrainer(network, dataset, learningrate=float(dados['learningrate']), momentum=float(dados['momentum']))

    pesos_iniciais = network.params

    network._setParameters(np.random.uniform(dados['peso_start'], dados['peso_end'], network.params.shape[0]))

    error = 1.00000000

    epocasPercorridas = 0

    errors = []
    it = []
    while epocasPercorridas < dados['epochs'] and error > dados['erro_max']:
        error = trainer.train()
        epocasPercorridas += 1
        errors.append(error)
        it.append(epocasPercorridas)
    graph = []
    idx = 0
    for e in errors:
        temp = []
        temp.append(idx)
        temp.append(e)
        idx +=1
        graph.append(temp)

    context = {'form': form.cleaned_data,
               'error': error,
               'graph': json.dumps(graph),
               'epocas': epocasPercorridas,
               'pesos_iniciais': pesos_iniciais,
               'pesos_finais': network.params,
               'result00': network.activate([0, 0])[0],
               'result01': network.activate([0, 1])[0],
               'result10': network.activate([1, 0])[0],
               'result11': network.activate([1, 1])[0]}

    return render(request, 'result.html', context)
    def reset(self, params, repetition):
        print params

        self.nDimInput = 1  #3
        self.inputEncoder = PassThroughEncoder()

        if params['output_encoding'] == None:
            self.outputEncoder = PassThroughEncoder()
            self.nDimOutput = 1
        elif params['output_encoding'] == 'likelihood':
            self.outputEncoder = ScalarBucketEncoder()
            self.nDimOutput = self.outputEncoder.encoder.n

        if (params['dataset'] == 'nyc_taxi'
                or params['dataset'] == 'nyc_taxi_perturb_baseline'):
            self.dataset = NYCTaxiDataset(params['dataset'])
        else:
            raise Exception("Dataset not found")

        self.testCounter = 0
        self.resets = []
        self.iteration = 0

        # initialize LSTM network
        random.seed(6)
        if params['output_encoding'] == None:
            self.net = buildNetwork(self.nDimInput,
                                    params['num_cells'],
                                    self.nDimOutput,
                                    hiddenclass=LSTMLayer,
                                    bias=True,
                                    outputbias=True,
                                    recurrent=True)
        elif params['output_encoding'] == 'likelihood':
            self.net = buildNetwork(self.nDimInput,
                                    params['num_cells'],
                                    self.nDimOutput,
                                    hiddenclass=LSTMLayer,
                                    bias=True,
                                    outclass=SigmoidLayer,
                                    recurrent=True)
        print self.net['out']
        print self.net['hidden0']
        self.trainer = BackpropTrainer(self.net,
                                       dataset=SequentialDataSet(
                                           self.nDimInput, self.nDimOutput),
                                       learningrate=0.01,
                                       momentum=0,
                                       verbose=params['verbosity'] > 0)

        (self.networkInput, self.targetPrediction, self.trueData) = \
          self.dataset.generateSequence(
          prediction_nstep=params['prediction_nstep'],
          output_encoding=params['output_encoding'],
          noise=params['noise'])
Example #13
0
    def train_network(self, network, dataset):

        trainer = BackpropTrainer(network,
                                  dataset,
                                  learningrate=0.01,
                                  momentum=0.99,
                                  verbose=True)
        for epoch in range(0, 1000):
            trainer.train()

        return network
Example #14
0
def testTraining():
    ds = WebsiteFeaturesDataSet()
    net = buildNetwork(ds.indim, 4, ds.outdim, recurrent=True)
    trainer = BackpropTrainer(net,
                              learningrate=0.001,
                              momentum=0.99,
                              verbose=True)
    trainer.trainOnDataset(ds, 1000)
    trainer.testOnData(verbose=True)
    import pdb
    pdb.set_trace()
Example #15
0
    def trainNetwork(self, net, dataset):
        print("Started Training: " + strftime("%Y-%m-%d %H:%M:%S", gmtime()))

        t = BackpropTrainer(net,
                            dataset,
                            learningrate=0.01,
                            momentum=0,
                            verbose=False)
        t.trainEpochs(epochs=1)

        print("Finished Training: " + strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        return t
Example #16
0
def generate_and_test_nn():
    d = load_training_set()
    n = buildNetwork(d.indim,
                     13,
                     d.outdim,
                     hiddenclass=LSTMLayer,
                     outclass=SoftmaxLayer,
                     outputbias=False,
                     recurrent=True)
    t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)
    t.trainOnDataset(d, 1000)
    t.testOnData(verbose=True)
    return (n, d)
Example #17
0
    def reset(self, params, repetition):
        random.seed(params['seed'])

        if params['encoding'] == 'basic':
            self.encoder = BasicEncoder(params['encoding_num'])
        elif params['encoding'] == 'distributed':
            self.encoder = DistributedEncoder(
                params['encoding_num'],
                maxValue=params['encoding_max'],
                minValue=params['encoding_min'],
                classifyWithRandom=params['classify_with_random'])
        else:
            raise Exception("Encoder not found")

        if params['dataset'] == 'simple':
            self.dataset = SimpleDataset()
        elif params['dataset'] == 'reber':
            self.dataset = ReberDataset(maxLength=params['max_length'])
        elif params['dataset'] == 'high-order':
            self.dataset = HighOrderDataset(
                numPredictions=params['num_predictions'], seed=params['seed'])
        else:
            raise Exception("Dataset not found")

        self.computeCounter = 0

        self.history = []
        self.resets = []
        self.randoms = []

        self.currentSequence = []
        self.targetPrediction = []
        self.replenishSequence(params, iteration=0)

        self.net = buildNetwork(params['encoding_num'],
                                params['num_cells'],
                                params['encoding_num'],
                                hiddenclass=LSTMLayer,
                                bias=True,
                                outputbias=params['output_bias'],
                                recurrent=True)

        self.trainer = BackpropTrainer(self.net,
                                       dataset=SequentialDataSet(
                                           params['encoding_num'],
                                           params['encoding_num']),
                                       learningrate=0.01,
                                       momentum=0,
                                       verbose=params['verbosity'] > 0)

        self.sequenceCounter = 0
Example #18
0
def trainedLSTMNN():
    """
    n = RecurrentNetwork()

    inp = LinearLayer(100, name = 'input')
    hid = LSTMLayer(30, name='hidden')
    out = LinearLayer(1, name='output')

    #add modules
    n.addOutputModule(out)
    n.addInputModule(inp)
    n.addModule(hid)

    #add connections
    n.addConnection(FullConnection(inp, hid))
    n.addConnection(FullConnection(hid, out))

    n.addRecurrentConnection(FullConnection(hid, hid))
    n.sortModules()
    """
    n = buildNetwork(100,
                     50,
                     1,
                     hiddenclass=LSTMLayer,
                     outputbias=False,
                     recurrent=True)

    print "Network created"
    d = load1OrderDataSet()
    print "Data loaded"
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs
    print "Learning started"
    count = 0
    while True:
        globErr = t.train()
        print "iteration #", count, " error = ", globErr
        if globErr < 0.1:
            break
        count = count + 1
        # if (count == 60):
        #     break

    # for i in range(100):
    #     print t.train()

    exportANN(n)

    return n
Example #19
0
def generate_forecasters(data, dtt, alpha):
    #Learning process-----------------------------------------------------------------
    global net, samples, trainer
    net = FeedForwardNetwork()
    inLayer = LinearLayer(3)
    hiddenLayer0 = SigmoidLayer(1)
    hiddenLayer1 = SigmoidLayer(3)
    outLayer = LinearLayer(1)

    net.addInputModule(inLayer)
    net.addModule(hiddenLayer0)
    #    net.addModule(hiddenLayer1)
    net.addOutputModule(outLayer)

    #    net.addConnection(FullConnection(inLayer, hiddenLayer0))
    net.addConnection(FullConnection(inLayer, outLayer))
    #    net.addConnection(FullConnection(hiddenLayer0, outLayer))
    #    net.addConnection(FullConnection(hiddenLayer0, hiddenLayer1))
    #    net.addConnection(FullConnection(hiddenLayer1, outLayer))
    net.sortModules()
    print net
    ##Net with 3 inputs, 8 hidden neurons in a layerand 8 in another, and 1 out.
    #net = buildNetwork(3,8,8,1)
    ##Set with 2 inputs and one output for each sample
    samples = SupervisedDataSet(3, 1)

    for i in dtt:
        samples.addSample(i['past'], i['next'] - i['average'])
    trainer = BackpropTrainer(net, samples)

    print 'Training'
    #trainer.trainUntilConvergence(maxEpochs= 1)

    #Making Forecasters---------------------------------------------------------------
    aux = map(lambda x: x[0], data)

    def exp(self, a, x):
        self.exp = a * data[aux.index(x) - 1][1] + (1 - a) * self.exp
        return self.exp

    naive = Forecaster(name='Naive',
                       predict_function=lambda x: data[aux.index(x) - 1][1])
    exponential = Forecaster(name='Exponential')
    exponential.exp = data[0][1]
    exponential.predict = lambda x: exp(exponential, alpha, x)
    network = Forecaster(name='Network', predict_function=net.activate)

    return naive, exponential, network
Example #20
0
def trained3ONN():
    n = FeedForwardNetwork()

    inp = LinearLayer(176850, name='input')
    hid = LinearLayer(3, name='hidden')
    out = LinearLayer(1, name='output')

    #add modules
    n.addOutputModule(out)
    n.addInputModule(inp)
    n.addModule(hid)

    #add connections
    n.addConnection(FullConnection(inp, hid, inSliceTo=100, outSliceTo=1))
    n.addConnection(
        FullConnection(inp,
                       hid,
                       inSliceFrom=100,
                       inSliceTo=5150,
                       outSliceFrom=1,
                       outSliceTo=2))
    n.addConnection(FullConnection(inp, hid, inSliceFrom=5150, outSliceFrom=2))
    n.addConnection(FullConnection(hid, out))

    n.sortModules()
    print "Network created"
    d = load3OrderDataSet()
    print "Data loaded"
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs
    print "Learning started"
    count = 0
    while True:
        globErr = t.train()
        print "iteration #", count, " error = ", globErr
        if globErr < 0.01:
            break
        count = count + 1
        # if (count == 100):
        #     break

    # for i in range(100):
    #     print t.train()

    exportANN(n)

    return n
Example #21
0
def training_and_testing():
    nn = init_neural_network()

    training = learning.get_labeled_data(
        '%strain-images-idx3-ubyte.gz' % (database_folder),
        '%strain-labels-idx1-ubyte.gz' % (database_folder),
        '%strainig' % (database_folder))
    test = learning.get_labeled_data(
        '%st10k-images-idx3-ubyte.gz' % (database_folder),
        '%st10k-labels-idx1-ubyte.gz' % (database_folder),
        '%stest' % (database_folder))

    FEATURES = N_INPUT_LAYER
    print("Caracteristicas a analizar: %i" % FEATURES)
    testdata = ClassificationDataSet(FEATURES, 1, nb_classes=OUTPUT_LAYER)
    trainingdata = ClassificationDataSet(FEATURES, 1, nb_classes=OUTPUT_LAYER)

    for i in range(len(test['data'])):
        testdata.addSample(test['data'][i], test['label'][i])
    for j in range(len(training['data'])):
        trainingdata.addSample(training['data'][j], training['label'][j])

    trainingdata._convertToOneOfMany()
    testdata._convertToOneOfMany()

    trainer = BackpropTrainer(nn,
                              dataset=trainingdata,
                              momentum=MOMENTUM,
                              verbose=True,
                              weightdecay=W_DECAY,
                              learningrate=L_RATE,
                              lrdecay=L_DECAY)

    for i in range(EPOCHS):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(),
                                 trainingdata['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=testdata),
                                 testdata['class'])

        print("epoch: %4d" % trainer.totalepochs,
              "  train error: %5.2f%%" % trnresult,
              "  test error: %5.2f%%" % tstresult)
    return nn
def treinamento_Portas(list_Entrada_Saida, NumCamadasOcultas, taxa_aprendizado,
                       epochs):
    # adiciona-se as amostras
    d_in = 0
    d_out = 0
    for d in list_Entrada_Saida:
        d_in = len(d[0])
        d_out = len(d[1])

    dataset = SupervisedDataSet(d_in, d_out)
    for l in list_Entrada_Saida:
        entrada = l[0]
        saida = l[1]
        dataset.addSample(entrada, saida)

    # construindo a rede

    network = buildNetwork(
        dataset.indim,
        NumCamadasOcultas,
        dataset.outdim,
        bias=True,
        hiddenclass=SigmoidLayer,
        outclass=SigmoidLayer,
    )

    # utilizando o backpropagation
    trainer = BackpropTrainer(network, dataset, learningrate=taxa_aprendizado)

    # trainamento da rede
    for epocas in range(epochs):
        trainer.train()

    # teste da rede
    test_data = SupervisedDataSet(d_in, d_out)
    for l in list_Entrada_Saida:
        entrada = l[0]
        saida = l[1]
        test_data.addSample(entrada, saida)

    try:
        trainer.testOnData(test_data, verbose=True)
    except:
        pass
Example #23
0
def network(dataset, input_list):
    num_words = len(input_list)
    #dividing the dataset into training and testing data
    tstdata, trndata = dataset.splitWithProportion(0.25)

    #building the network
    net = RecurrentNetwork()
    input_layer1 = LinearLayer(num_words, name='input_layer1')
    input_layer2 = LinearLayer(num_words, name='input_layer2')
    hidden_layer = TanhLayer(num_words, name='hidden_layer')
    output_layer = SoftmaxLayer(num_words, name='output_layer')
    net.addInputModule(input_layer1)
    net.addInputModule(input_layer2)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)
    net.addConnection(
        FullConnection(input_layer1, hidden_layer, name='in1_to_hidden'))
    net.addConnection(
        FullConnection(input_layer2, hidden_layer, name='in2_to_hidden'))
    net.addConnection(
        FullConnection(hidden_layer, output_layer, name='hidden_to_output'))
    net.addConnection(
        FullConnection(input_layer1, output_layer, name='in1_to_out'))
    net.addConnection(
        FullConnection(input_layer2, output_layer, name='in2_to_out'))
    net.sortModules()
    #backpropagation
    trainer = BackpropTrainer(net,
                              dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    #error checking part
    for i in range(10):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['target'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['target'])
        print "epoch: %4d" % trainer.totalepochs
        print "  train error: %5.10f%%" % trnresult
        print "  test error: %5.10f%%" % tstresult
    return net
Example #24
0
    def __init__(self, *args, **kwargs):
        super(PerceptronPyBrainFilter, self).__init__(*args, **kwargs)

        # input, hidden_layers, output
        self.perceptron = buildNetwork(self.num_last_measures, 0, 1, \
                                       hiddenclass=pybrain.structure.modules.SigmoidLayer, #@UndefinedVariable \
                                       outclass=pybrain.structure.modules.SigmoidLayer) #@UndefinedVariable

        # input dimension, target dimension
        self.pointer = 0
        self.data = SupervisedDataSet(self.num_last_measures, 1)
        for _i in xrange(self.dataset_size):
            self.data.addSample([0] * self.num_last_measures, 0)
        self.trainer = BackpropTrainer(self.perceptron,
                                       self.data,
                                       learningrate=self.learning_rate)

        # This call does some internal initialization which is necessary before the net can finally
        # be used: for example, the modules are sorted topologically.
        self.perceptron.sortModules()
Example #25
0
    def train(self, dataSet):
        """
        Builds a network and trains it.
        """
        if os.stat(self.predictor_path).st_size != 0:
            self.network = NetworkReader.readFrom(self.predictor_path)
        else:
            self.network = buildNetwork(dataSet.indim, 4, dataSet.outdim,recurrent=True)

        t = None


        if len(dataSet) > 0:
            t = BackpropTrainer(self.network, dataSet, learningrate = self.learningrate, momentum = self.momentum, verbose = False)
            for epoch in range(0, self.epochs):
                t.train()

        NetworkWriter.writeToFile(self.network, self.predictor_path)

        return t
Example #26
0
def testTraining():
    # the AnBnCn dataset (sequential)
    d = AnBnCnDataSet()

    # build a recurrent network to be trained
    hsize = 2
    n = RecurrentNetwork()
    n.addModule(TanhLayer(hsize, name='h'))
    n.addModule(BiasUnit(name='bias'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['bias'], n['h']))
    n.addConnection(FullConnection(n['h'], n['out']))
    n.addRecurrentConnection(FullConnection(n['h'], n['h']))
    n.sortModules()

    # initialize the backprop trainer and train
    t = BackpropTrainer(n, learningrate=0.1, momentum=0.0, verbose=True)
    t.trainOnDataset(d, 200)

    # the resulting weights are in the network:
    print('Final weights:', n.params)
Example #27
0
    def __init__(self):
        self.code = {'cat': [1, 0, 0], 'dust': [0, 1, 0], 'water': [0, 0, 1]}

        pack = 'media.images_train'
        train_data = [
            (Neuron(load(file_path(pack, 'cat1.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'cat2.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'cat3.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'dust1.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'dust2.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'dust3.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'water1.png'))), self.code['water']),
            (Neuron(load(file_path(pack, 'water2.png'))), self.code['water']),
            (Neuron(load(file_path(pack, 'water3.png'))), self.code['water']),
        ]

        for x, output in train_data:
            x.prepare()

        self.net = buildNetwork(4,
                                3,
                                3,
                                hiddenclass=TanhLayer,
                                outclass=SoftmaxLayer)
        data = SupervisedDataSet(4, 3)

        for x, output in train_data:
            data.addSample((
                x.contours / 100.0,
                x.color[0] / 1000.0,
                x.color[1] / 1000.0,
                x.color[2] / 1000.0,
            ), output)

        trainer = BackpropTrainer(self.net,
                                  momentum=0.1,
                                  verbose=True,
                                  weightdecay=0.01)
        trainer.trainOnDataset(data, 1000)  # 1000 iterations
        trainer.testOnData(verbose=True)
Example #28
0
    def dataset_manipulation(self):
        self.dataset = SupervisedDataSet(len(lib.entrada[0]),
                                         len(lib.saida[0]))

        ## Number of neurons in Hidden Layer
        nr_neurons = self.page_2.sb_nr_neurons.value()

        ## Number os epochs
        nr_epochs = self.page_2.sb_nr_epochs.value()

        ## Leaning rate:
        learn_rate = self.page_2.sb_rate.value()

        ## Momentum:
        momentum = self.page_2.sb_momentum.value()

        ## Adding Train Samples
        for i in range(lib.training):
            self.dataset.addSample(lib.entrada[i], lib.saida[i])
        print('Training: %d' % lib.training)

        ## Buid Network
        self.network = buildNetwork(self.dataset.indim,
                                    nr_neurons,
                                    self.dataset.outdim,
                                    bias=True)

        ## Back Propagation Trainer
        self.trainer = BackpropTrainer(self.network, self.dataset, learn_rate,
                                       momentum)

        self.page_2.count_1.setText(str(lib.training))
        self.page_2.count_2.setText(str(lib.validation))
        self.page_2.count_3.setText(str(lib.testing))
        QtGui.QApplication.processEvents()

        self.train_epochs(nr_epochs)
Example #29
0
def gradientCheck(module, tolerance=0.0001, dataset=None):
    """ check the gradient of a module with a randomly generated dataset,
    (and, in the case of a network, determine which modules contain incorrect derivatives). """
    if module.paramdim == 0:
        print('Module has no parameters')
        return True
    if dataset:
        d = dataset
    else:
        d = buildAppropriateDataset(module)
    b = BackpropTrainer(module)
    res = b._checkGradient(d, True)
    # compute average precision on every parameter
    precision = zeros(module.paramdim)
    for seqres in res:
        for i, p in enumerate(seqres):
            if p[0] == 0 and p[1] == 0:
                precision[i] = 0
            else:
                precision[i] += abs((p[0] + p[1]) / (p[0] - p[1]))
    precision /= len(res)
    if max(precision) < tolerance:
        print('Perfect gradient')
        return True
    else:
        print('Incorrect gradient', precision)
        if isinstance(module, Network):
            index = 0
            for m in module._containerIterator():
                if max(precision[index:index + m.paramdim]) > tolerance:
                    print('Incorrect module:', m,
                          res[-1][index:index + m.paramdim])
                index += m.paramdim
        else:
            print(res)
        return False
def get_third_nn(value, good_data, bad_data):
    build_network = FeedForwardNetwork()
    inLayer = LinearLayer(len(good_data[0]))
    hiddenLayer = SigmoidLayer(value)
    outLayer = SigmoidLayer(1)

    build_network.addInputModule(inLayer)
    build_network.addModule(hiddenLayer)
    build_network.addOutputModule(outLayer)

    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    in_to_out = FullConnection(inLayer, outLayer)

    build_network.addConnection(in_to_hidden)
    build_network.addConnection(hidden_to_out)
    build_network.addConnection(in_to_out)

    build_network.sortModules()
    trainer = BackpropTrainer(build_network,
                              get_supervised_data_set(good_data, bad_data))

    result = trainer.trainUntilConvergence()
    return result[0][-1]