Ejemplo n.º 1
0
def buildModel(numOfPoints, workingDir):
    # Ask what action they want to do
    print "\nAlright, lets build the model.\n"
    try:
        numOfSamples = int(raw_input("How many samples per class: "))
    except ValueError:
        print "Not a number"
        sys.exit(0)

    try:
        epochs = int(raw_input("Max training iterations: "))
    except ValueError:
        print "Not a number"
        sys.exit(0)

    fftData = readRGBData(workingDir, numOfSamples,
                          numOfPoints / 2)  # Read sample data
    printRGBData(fftData, numOfSamples, numOfPoints / 2,
                 workingDir)  # Export data for external testing is desired
    # Train the backprop network and time how long it takes
    t0 = time()
    fnn = trainNetwork(fftData, numOfSamples, numOfPoints / 2, epochs)
    t1 = time()
    print 'It took %s to build the NN' % str(
        datetime.timedelta(seconds=(t1 - t0)))
    # Save backprop net using NetworkWriter
    NetworkWriter.writeToFile(fnn, workingDir + '/fnn.xml')
Ejemplo n.º 2
0
def runTest(hidden_layer=3,
            learning_rate=0.1,
            momentum=0.5,
            epochs=5000,
            filename='RCNetwork2.xml'):
    ds = buildDataSet()
    tstdata, trndata = ds.splitWithProportion(0.25)
    trndata._convertToOneOfMany()
    tstdata._convertToOneOfMany()
    net = buildNetwork(hidden_layer)
    #define the connections
    trainer = BackpropTrainer(net,
                              dataset=trndata,
                              momentum=momentum,
                              verbose=False,
                              weightdecay=learning_rate)
    #trainer = BackpropTrainer(net, learningrate = 0.01, dataset = ds, momentum = 0.99, verbose = True)
    trainer.trainEpochs(epochs)
    trnresult = percentError(trainer.testOnClassData(), trndata['class'])
    tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                             tstdata['class'])
    print filename
    print "epoch: %4d" % trainer.totalepochs, \
          "  train error: %5.2f%%" % trnresult, \
          "  test error: %5.2f%%" % tstresult
    #trainer.train()
    print 'Final weights:', net.params

    NetworkWriter.writeToFile(net, filename)
Ejemplo n.º 3
0
    def train(self, network, valid_bp, path):
        """
        Train until convergence, stopping the training when the training
        doesn't reduce the validation error after 1000 continuous epochs

        :param network: model
        :type network: NeuralNetwork.NeuralNetwork
        :param valid_bp: Validation set
        :type valid_bp: SupervisedDataSet
        :param path: Path where to save the trained model
        :type path: str
        :return: None
        :rtype: None
        """
        epochs = 0
        continue_epochs = 0
        # best_epoch = 0
        NetworkWriter.writeToFile(network.network, path)
        min_error = network.valid(valid_bp)
        while True:
            train_error = self.trainer.train()
            valid_error = network.valid(valid_bp)
            if valid_error < min_error:
                min_error = valid_error
                # best_epoch = epochs
                NetworkWriter.writeToFile(network.network, path)
                continue_epochs = 0
            self.training_errors.append(train_error)
            self.validation_errors.append(valid_error)
            epochs += 1
            continue_epochs += 1
            # print str(epochs) + " " + str(continue_epochs) + " " + str(best_epoch)
            if continue_epochs > 1000:
                break
Ejemplo n.º 4
0
def createAndTrainNetworkFromFile(curs_filename,
                                  count_input_samples,
                                  count_samples,
                                  net_filename,
                                  count_layers=33,
                                  count_outputs=1,
                                  max_epochs=15000,
                                  min_epochs=300):
    net = buildNetwork(count_input_samples, count_layers, count_outputs)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    wb = load_workbook(filename=curs_filename)
    ws = wb.active
    for i in range(0, count_samples):
        loaded_data = []
        for j in range(0, count_input_samples + 1):
            loaded_data.append(
                round(float(ws.cell(row=i + 1, column=j + 1).value), 4))
            #ds.addSample(loaded_data[:-1], loaded_data[-1])
        #print loaded_data[:-1], loaded_data[-1]
        ds.addSample(loaded_data[:-1], loaded_data[-1])
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    a = trainer.trainUntilConvergence(maxEpochs=max_epochs,
                                      continueEpochs=min_epochs,
                                      validationProportion=0.15)
    net_filename = net_filename[:-4] + str(a[0][-1]) + '.xml'
    NetworkWriter.writeToFile(net, net_filename)
    result_list = [a, net_filename]
    return result_list
Ejemplo n.º 5
0
 def SaveNet(self, filename=None):
     if filename == None:
         NetworkWriter.writeToFile(
             self.net, '%s  %s_%s_%s.xml' %
             (self.err, self.inputsize, self.hiden, self.outputsize))
     else:
         NetworkWriter.writeToFile(self.net, filename)
    def _InitNet(self):

        # -----------------------------------------------------------------------
        self._pr_line();
        print("| _InitNet(self): \n");
        start_time = time.time();
        # -----------------------------------------------------------------------
        if self._NET_NAME:
            
            # -----------------------------------------------------------------------
            self._SDS = SupervisedDataSet(900, 52); 

            if self._NET_NEW:

                print('| Bulding new NET: '+self._NET_NAME)
                self._NET = buildNetwork(self._SDS.indim, self._NET_HIDDEN, self._SDS.outdim, bias=True); #,hiddenclass=TanhLayer)
                self._SaveNET();
            else:

                print('| Reading NET from: '+self._NET_NAME)
                self._NET = NetworkReader.readFrom(self._NET_NAME)
            # -----------------------------------------------------------------------
            print('| Making AutoBAK: '+str(self._MK_AUTO_BAK))
            
            if self._MK_AUTO_BAK:
                NetworkWriter.writeToFile(self._NET, self._NET_NAME+".AUTO_BAK.xml");
            # -----------------------------------------------------------------------
            print("| Done in: "+str(time.time()-start_time)+'sec');
            # -----------------------------------------------------------------------

        else:
            
            print('| Unknown NET name: >|'+self._NET_NAME+'|<')
            exit();
Ejemplo n.º 7
0
def entrenarSonoliento(red):
    #Se inicializa el dataset
    ds = SupervisedDataSet(4096,1)

    """Se crea el dataset, para ello procesamos cada una de las imagenes obteniendo los rostros,
       luego se le asignan los valores deseados del resultado la red neuronal."""


    for i,c in enumerate(os.listdir(os.path.dirname(path + '/static/img/sleepy/'))):
        a = 0
        while a < 50:
            try:
                a += 1 
                im3 = cv2.imread(path + '/static/img/sleepy/'+c)
                procesado = p.procesarImagen(im3)
                cara = d.deteccionFacial1(procesado)
                ds.addSample(cara.flatten(),10)
            except:
                pass
            

    trainer = BackpropTrainer(red, ds)
    print "Entrenando hasta converger"
    trainer.trainOnDataset(ds,100)
    NetworkWriter.writeToFile(red, 'rna_somnolencia.xml')



#para entrenar operadores manualmente
#red_operador = NetworkReader.readFrom('rna_operador.xml')
#entrenarOperador(red_operador)

#para entrenar somnolencia manualmente
#red_somno = NetworkReader.readFrom('rna_somnolencia.xml')
#entrenarSonoliento(red_somno)
Ejemplo n.º 8
0
def entrenarSomnolencia(red):
    #Se inicializa el dataset
    ds = SupervisedDataSet(4096,1)

    """Se crea el dataset, para ello procesamos cada una de las imagenes obteniendo los rostros,
       luego se le asignan los valores deseados del resultado la red neuronal."""

    print "Somnolencia - cara"
    for i,c in enumerate(os.listdir(os.path.dirname('/home/taberu/Imágenes/img_tesis/somnoliento/'))):
        try:
            im = cv2.imread('/home/taberu/Imágenes/img_tesis/somnoliento/'+c)
            pim = pi.procesarImagen(im)
            cara = d.deteccionFacial(pim)
            if cara == None:
                print "No hay cara"
            else:
                print i
                ds.appendLinked(cara.flatten(),10)
        except:
            pass

    trainer = BackpropTrainer(red, ds)
    print "Entrenando hasta converger"
    trainer.trainUntilConvergence()
    NetworkWriter.writeToFile(red, 'rna_somnolencia.xml')
Ejemplo n.º 9
0
    def train(self, network, valid_bp, path):
        """
        Train until convergence, stopping the training when the training
        doesn't reduce the validation error after 1000 continuous epochs

        :param network: model
        :type network: NeuralNetwork.NeuralNetwork
        :param valid_bp: Validation set
        :type valid_bp: SupervisedDataSet
        :param path: Path where to save the trained model
        :type path: str
        :return: None
        :rtype: None
        """
        epochs = 0
        continue_epochs = 0
        # best_epoch = 0
        NetworkWriter.writeToFile(network.network, path)
        min_error = network.valid(valid_bp)
        while True:
            train_error = self.trainer.train()
            valid_error = network.valid(valid_bp)
            if valid_error < min_error:
                min_error = valid_error
                # best_epoch = epochs
                NetworkWriter.writeToFile(network.network, path)
                continue_epochs = 0
            self.training_errors.append(train_error)
            self.validation_errors.append(valid_error)
            epochs += 1
            continue_epochs += 1
            # print str(epochs) + " " + str(continue_epochs) + " " + str(best_epoch)
            if continue_epochs > 1000:
                break
Ejemplo n.º 10
0
def big_training(np_data, num_nets=1, num_epoch=20, net_builder=net_full, train_size=.1, testing=False):
    sss = cross_validation.StratifiedShuffleSplit(np_data[:,:1].ravel(), n_iter=num_nets , test_size=1-train_size, random_state=3476)
    nets=[None for net_ind in range(num_nets)]
    trainaccu=[[0 for i in range(num_epoch)] for net_ind in range(num_nets)]
    testaccu=[[0 for i in range(num_epoch)] for net_ind in range(num_nets)]
    net_ind=0
    for train_index, test_index in sss:
        print ('%s Building %d. network.' %(time.ctime(), net_ind+1))
        #print("TRAIN:", len(train_index), "TEST:", len(test_index))
        trainset = ClassificationDataSet(np_data.shape[1] - 1, 1)
        trainset.setField('input', np_data[train_index,1:]/100-.6)
        trainset.setField('target', np_data[train_index,:1])
        trainset._convertToOneOfMany( )
        trainlabels = trainset['class'].ravel().tolist()
        if testing:
            testset = ClassificationDataSet(np_data.shape[1] - 1, 1)
            testset.setField('input', np_data[test_index,1:]/100-.6)
            testset.setField('target', np_data[test_index,:1])
            testset._convertToOneOfMany( )
            testlabels = testset['class'].ravel().tolist()
        nets[net_ind] = net_builder()
        trainer = BackpropTrainer(nets[net_ind], trainset)
        for i in range(num_epoch):
            for ii in range(3):
                err = trainer.train()
            print ('%s Epoch %d: Network trained with error %f.' %(time.ctime(), i+1, err))
            trainaccu[net_ind][i]=accuracy_score(trainlabels,trainer.testOnClassData())
            print ('%s Epoch %d: Train accuracy is %f' %(time.ctime(), i+1, trainaccu[net_ind][i]))
            print ([sum([trainaccu[y][i]>tres for y in range(net_ind+1)]) for tres in [0,.1,.2,.3,.4,.5,.6]])
            if testing:
                testaccu[net_ind][i]=accuracy_score(testlabels,trainer.testOnClassData(testset))
                print ('%s Epoch %d: Test accuracy is %f' %(time.ctime(), i+1, testaccu[net_ind][i]))
        NetworkWriter.writeToFile(nets[net_ind], 'nets/'+net_builder.__name__+str(net_ind)+'.xml')
        net_ind +=1
    return [nets, trainaccu, testaccu]
Ejemplo n.º 11
0
def begin1():

    cbf = readFromCsv("cbf2")
    numdataset = np.array(cbf, dtype=np.float64)
    #训练数据,验证数据,今天的数据
    tgdataset, vadataset, tydata = dataSplit(numdataset)
    #归一的参数
    gydata, dmean, dstd = gyData(tgdataset)

    #验证和今天的数据
    gyvadata = calFeature(vadataset, dmean, dstd)
    gytydata = calFeature(tydata, dmean, dstd)

    #神经网络
    trainingset = buildTrainingSet(gydata)

    for i in range(1000):
        net = buildNetwork(15,
                           8,
                           1,
                           bias=True,
                           hiddenclass=TanhLayer,
                           outclass=TanhLayer)
        trainer = BackpropTrainer(net, trainingset)
        trainer.trainEpochs(epochs=100)
        rate = va.calRightRate(gyvadata, net)
        if rate > 0.6:
            NetworkWriter.writeToFile(
                net, '../netv3/zxtx_8l_100t_6_' + str(rate) + ".xml")
            print(va.calRightRate(gyvadata, net))
            print(va.calRightRate(gytydata, net))
        print(str(i) + " times " + str(rate))


# begin1();
 def save(self, filename, desc=None):
     NetworkWriter.writeToFile(self.net, filename + '.xml')
     params = {'labels': self.labels,
               'mean': self.mean.tolist(),
               'std': self.std.tolist()}
     with open(filename + '.yaml', 'w') as f:
         f.write(yaml.dump(params, default_flow_style=False))
 def save(self, path):
     """
     This function saves the neural network.
     Args:
     :param path (String): the path where the neural network is going to be saved.
     """
     NetworkWriter.writeToFile(self.network, path)
Ejemplo n.º 14
0
def nntester(tx, ty, rx, ry, iterations):
    """
    builds, tests, and graphs a neural network over a series of trials as it is
    constructed
    """
    resultst = []
    resultsr = []
    positions = range(iterations)
    network = buildNetwork(100, 50, 1, bias=True)
    ds = ClassificationDataSet(100,1, class_labels=["valley", "hill"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds, learningrate=0.01)
    for i in positions:
        print trainer.train()
        resultst.append(sum((np.array([round(network.activate(test)) for test in tx]) - ty)**2)/float(len(ty)))
        resultsr.append(sum((np.array([round(network.activate(test)) for test in rx]) - ry)**2)/float(len(ry)))
        print i, resultst[i], resultsr[i]
    NetworkWriter.writeToFile(network, "network.xml")
    plt.plot(positions, resultst, 'ro', positions, resultsr, 'bo')
    plt.axis([0, iterations, 0, 1])
    plt.ylabel("Percent Error")
    plt.xlabel("Network Epoch")
    plt.title("Neural Network Error")
    plt.savefig('3Lnn.png', dpi=300)
Ejemplo n.º 15
0
def process_symbol(net, symbol):
 print "processing ", symbol
 #zuerst train_data prüfen, wenn keine Trainingsdaten da sind, dann brauchen wir nicht weitermachen
 train_data = load(symbol+'.train')
 if (len(train_data) == 0):
  print "--no training data, skip", symbol
  return
 print "-traing data loaded"
 data = load_stockdata(symbol)
 if (len(data) == 0):
  print "--no data, skip", symbol
  return
 print "-stock data loaded"
 settings = load_settings(symbol,data)
 if(len(settings) == 0):
  print "--no settings, skip", symbol
  return
 print "-settings loaded"
 #jetzt sind alle Daten vorhanden
 ds = build_dataset(data, train_data, settings)
 print "-train"
 trainer = BackpropTrainer(net, ds)
 trainer.trainEpochs(epochs)
 print "-saving network"
 NetworkWriter.writeToFile(net, 'network.xml') 
 return net
Ejemplo n.º 16
0
def nntester(tx, ty, rx, ry, iterations):
    """
    builds, tests, and graphs a neural network over a series of trials as it is
    constructed
    """
    resultst = []
    resultsr = []
    positions = range(iterations)
    network = buildNetwork(100, 50, 1, bias=True)
    ds = ClassificationDataSet(100, 1, class_labels=["valley", "hill"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds, learningrate=0.01)
    for i in positions:
        print trainer.train()
        resultst.append(
            sum((np.array([round(network.activate(test))
                           for test in tx]) - ty)**2) / float(len(ty)))
        resultsr.append(
            sum((np.array([round(network.activate(test))
                           for test in rx]) - ry)**2) / float(len(ry)))
        print i, resultst[i], resultsr[i]
    NetworkWriter.writeToFile(network, "network.xml")
    plt.plot(positions, resultst, 'ro', positions, resultsr, 'bo')
    plt.axis([0, iterations, 0, 1])
    plt.ylabel("Percent Error")
    plt.xlabel("Network Epoch")
    plt.title("Neural Network Error")
    plt.savefig('3Lnn.png', dpi=300)
Ejemplo n.º 17
0
def createNet():
	"""Create and seed the intial neural network"""
	#CONSTANTS
	nn_input_dim = 6 #[x_enemy1, y_enemy1, x_enemy2, y_enemy2, x_enemy3, y_enemy3]
	nn_output_dim = 6 #[x_ally1, y_ally1, x_ally2, y_ally2, x_ally3, y_ally3]

	allyTrainingPos, enemyTrainingPos = runExperiments.makeTrainingDataset()

	ds = SupervisedDataSet(nn_input_dim, nn_output_dim)

	#normalizes and adds it to the dataset
	for i in range(0, len(allyTrainingPos)):
		x = normalize(enemyTrainingPos[i])
		y = normalize(allyTrainingPos[i])
		x = [val for pair in x for val in pair]
		y = [val for pair in y for val in pair]
		ds.addSample(x, y)

	for inpt, target in ds:
		print inpt, target

	net = buildNetwork(nn_input_dim, 30, nn_output_dim, bias=True, hiddenclass=TanhLayer)
	trainer = BackpropTrainer(net, ds)
	trainer.trainUntilConvergence()
	NetworkWriter.writeToFile(net, "net.xml")
	enemyTestPos = runExperiments.makeTestDataset()
	print(net.activate([val for pair in normalize(enemyTestPos) for val in pair]))
	return ds
Ejemplo n.º 18
0
def training(d):
    # net = buildNetwork(d.indim, 55, d.outdim, bias=True,recurrent=False, hiddenclass =SigmoidLayer , outclass = SoftmaxLayer)
    net = FeedForwardNetwork()
    inLayer = SigmoidLayer(d.indim)
    hiddenLayer1 = SigmoidLayer(d.outdim)
    hiddenLayer2 = SigmoidLayer(d.outdim)
    outLayer = SigmoidLayer(d.outdim)

    net.addInputModule(inLayer)
    net.addModule(hiddenLayer1)
    net.addModule(hiddenLayer2)
    net.addOutputModule(outLayer)

    in_to_hidden = FullConnection(inLayer, hiddenLayer1)
    hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
    hidden_to_out = FullConnection(hiddenLayer2, outLayer)

    net.addConnection(in_to_hidden)
    net.addConnection(hidden_to_hidden)
    net.addConnection(hidden_to_out)

    net.sortModules()
    print net

    t = BackpropTrainer(net, d, learningrate = 0.9,momentum=0.9, weightdecay=0.01, verbose = True)
    t.trainUntilConvergence(continueEpochs=1200, maxEpochs=1000)
    NetworkWriter.writeToFile(net, 'myNetwork'+str(time.time())+'.xml')
    return t
def main():
    agent = SimpleMLPMarioAgent(
        10,
        inGridSize=3,
    )
    print agent.name
    NetworkWriter.writeToFile(agent.module,
                              "../temp/MarioNetwork-" + agent.name + ".xml")

    task = MarioTask(agent.name, timeLimit=200)
    exp = EpisodicExperiment(task, agent)
    res = 0
    cumul = 0
    for seed in [0]:
        for difficulty in [0, 3, 5, 10]:
            task.env.levelSeed = seed
            task.env.levelDifficulty = difficulty

            exp.doEpisodes(1)
            print 'Difficulty: %d, Seed: %d, Fitness: %.2f' % (
                difficulty, seed, task.reward)
            cumul += task.reward
            if task.reward < 4000:
                break
            res += 1
    print res
    print agent.module.inputbuffer * 1.
Ejemplo n.º 20
0
def _learn():

    global _TRAIN_RATE;

    _LEARNINGS_GRADE = 0.00012; # 0.00012 == correct
    #_LEARNINGS_GRADE = 0.0012; 
    #_LEARNINGS_GRADE = 0.012; 
    #_LEARNINGS_GRADE = 0.12; 
    #_LEARNINGS_GRADE = 0.80; 
    #_LEARNINGS_GRADE = 1.4;
    #_LEARNINGS_GRADE = 6.2;
    #_LEARNINGS_GRADE = 10.2;
    _LEARNINGS_GRADE = 20.2;

    #_TRAIN_RATE = float(str(_TRAINER.train())); 
    _SECS = int( str(time.time()).split('.')[0] );

    while _TRAIN_RATE > _LEARNINGS_GRADE:

        _TRAIN_RATE = float(str(_TRAINER.train()));
        #NetworkWriter.writeToFile(_NET, str(str(_TRAIN_RATE).split(":")[1])+"_"+_NET_NAME+".AUTO_SAVE.xml")
        NetworkWriter.writeToFile(_NET, "_"+str(_TRAIN_RATE)+"_"+_NET_NAME+".xml")

        print("Learn-Duration: "+str(time.strftime("%H:%M:%S", time.localtime(int( str(time.time()).split('.')[0] )-_SECS))));
        _SECS = int( str(time.time()).split('.')[0] );

    if _TRAIN_RATE < _LEARNINGS_GRADE:
        print('Network ready.');
Ejemplo n.º 21
0
def train(X, y):
    """ Trains and predicts dataset with a Neural Network classifier """

    ds = ClassificationDataSet(len(X.columns), 1, nb_classes=2)
    for k in xrange(len(X)):
        ds.addSample(X.iloc[k], np.array(y[k]))
    tstdata, trndata = ds.splitWithProportion(0.20)
    trndata._convertToOneOfMany()
    tstdata._convertToOneOfMany()
    input_size = len(X.columns)
    target_size = 1
    hidden_size = 5
    fnn = None
    if os.path.isfile('fnn.xml'):
        fnn = NetworkReader.readFrom('fnn.xml')
    else:
        fnn = buildNetwork(trndata.indim,
                           hidden_size,
                           trndata.outdim,
                           outclass=SoftmaxLayer)
    trainer = BackpropTrainer(fnn,
                              dataset=trndata,
                              momentum=0.05,
                              learningrate=0.1,
                              verbose=False,
                              weightdecay=0.01)

    trainer.trainUntilConvergence(verbose=False,
                                  validationProportion=0.15,
                                  maxEpochs=100,
                                  continueEpochs=10)
    NetworkWriter.writeToFile(fnn, 'oliv.xml')
    predictions = trainer.testOnClassData(dataset=tstdata)
    return tstdata['class'], predictions
Ejemplo n.º 22
0
 def save(self, filename, desc=None):
     NetworkWriter.writeToFile(self.net, filename + '.xml')
     params = {
         'labels': self.labels,
         'mean': self.mean.tolist(),
         'std': self.std.tolist()
     }
     with open(filename + '.yaml', 'w') as f:
         f.write(yaml.dump(params, default_flow_style=False))
Ejemplo n.º 23
0
def saveNetwork(net, name):
    """Экспорт нейронной сети в файл

    Аргументы:
    net - нейронная сеть, PyBrain network
    name -- имя файла, строка

    """
    NetworkWriter.writeToFile(net, name)
Ejemplo n.º 24
0
    def saveToFile(self):
        if self.net is not None:
            if self.major:
                NetworkWriter.writeToFile(self.net, TRAINED_DATA_FILEPATH_MAJOR)
            else:
                NetworkWriter.writeToFile(self.net, TRAINED_DATA_FILEPATH_MINOR)

        else:
            print "Cannot save nothing"
Ejemplo n.º 25
0
def main():
    #agent1 = SimpleMLPMarioAgent(2)
    #agent1 = MLPMarioAgent(4)
    #agent1 = MdrnnAgent()
    
    agent1 = SimpleMdrnnAgent()
    print agent1.name
    NetworkWriter.writeToFile(agent1.module, "../temp/MarioNetwork-"+agent1.name+".xml")
    f = combinedScore(agent1)
    print "\nTotal:", f
Ejemplo n.º 26
0
def trainNetwork(net, sample_list, validate_list, net_filename, max_epochs=5500, min_epochs=300):
    count_input_samples = len(sample_list)
    count_outputs = len(validate_list)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    ds.addSample(sample_list, validate_list)
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    trainer.trainUntilConvergence(maxEpochs=max_epochs, continueEpochs=min_epochs)
    NetworkWriter.writeToFile(net, net_filename)
    return net
Ejemplo n.º 27
0
def nn(tx, ty, rx, ry, iterations):
    network = buildNetwork(14, 5, 5, 1)
    ds = ClassificationDataSet(14,1, class_labels=["<50K", ">=50K"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds)
    trainer.trainOnDataset(ds, iterations)
    NetworkWriter.writeToFile(network, "network.xml")
    results = sum((np.array([round(network.activate(test)) for test in rx]) - ry)**2)/float(len(ry))
    return results
def main():
    #agent1 = SimpleMLPMarioAgent(2)
    #agent1 = MLPMarioAgent(4)
    #agent1 = MdrnnAgent()

    agent1 = SimpleMdrnnAgent()
    print agent1.name
    NetworkWriter.writeToFile(agent1.module,
                              "../temp/MarioNetwork-" + agent1.name + ".xml")
    f = combinedScore(agent1)
    print "\nTotal:", f
Ejemplo n.º 29
0
def nn(tx, ty, rx, ry, iterations):
    network = buildNetwork(14, 5, 5, 1)
    ds = ClassificationDataSet(14, 1, class_labels=["<50K", ">=50K"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds)
    trainer.trainOnDataset(ds, iterations)
    NetworkWriter.writeToFile(network, "network.xml")
    results = sum((np.array([round(network.activate(test))
                             for test in rx]) - ry)**2) / float(len(ry))
    return results
Ejemplo n.º 30
0
def train(net,ds):
 trainer = BackpropTrainer(net, ds)
 #for i in range(0,epochs):
 # print trainer.train()
 #trainer.trainEpochs(epochs)
 print "-e", trainer.train()
 #trainer.trainUntilConvergence()
 trainer.trainEpochs(epochs)
 print "-e", trainer.train()
 print "-saving network"
 NetworkWriter.writeToFile(net, network_file)
 return net
Ejemplo n.º 31
0
    def train(self):

        self.done = False
        trainer = BackpropTrainer(self.network, learningrate=0.2)

        train_until_convergence(trainer=trainer, dataset=self.ds,
                                max_error=self.max_error,
                                max_epochs=self.epochs)

        self.done = True
        if self.save:
            NetworkWriter.writeToFile(self.network, self.save)
Ejemplo n.º 32
0
 def save_network(self,name_of_the_net):
     '''
     Save the network and the index of the test data
     '''
     print "Saving the trained network and test data index"
     if self.network is None:
         print "Network has not been trained!!"
     else:
         NetworkWriter.writeToFile(self.network, name_of_the_net)
         fileName = name_of_the_net.replace('.xml','')
         fileName = fileName+'_testIndex.txt'
         np.savetxt(fileName,self.tstIndex)
         print "Saving Finished"
Ejemplo n.º 33
0
    def train(self):

        self.done = False
        trainer = BackpropTrainer(self.network, learningrate=0.2)

        train_until_convergence(trainer=trainer,
                                dataset=self.ds,
                                max_error=self.max_error,
                                max_epochs=self.epochs)

        self.done = True
        if self.save:
            NetworkWriter.writeToFile(self.network, self.save)
Ejemplo n.º 34
0
def _train(X, Y, filename, epochs=50):
    global nn
    nn = buildNetwork(INPUT_SIZE, HIDDEN_LAYERS, OUTPUT_LAYER, bias=True, outclass=SoftmaxLayer)
    ds = ClassificationDataSet(INPUT_SIZE, OUTPUT_LAYER)
    for x, y in zip(X, Y):
        ds.addSample(x, y)
    trainer = BackpropTrainer(nn, ds)
    for i in xrange(epochs):
        error = trainer.train()
        print "Epoch: %d, Error: %7.4f" % (i+1, error)
    # trainer.trainUntilConvergence(verbose=True, maxEpochs=epochs, continueEpochs=10)
    if filename:
        NetworkWriter.writeToFile(nn, 'data/' + filename + '.nn')
Ejemplo n.º 35
0
def writeAgentNet(score, counter):
    NetworkWriter.writeToFile(
        agent.module,
        "../temp/"
        + str(port)
        + "x/SimpleMdrnnANet-fit:"
        + str(round(score, 2))
        + "-after:"
        + str(counter)
        + "-"
        + str(idd)
        + ".xml",
    )
Ejemplo n.º 36
0
def createAndTrainNetworkFromList(train_list, count_input_samples, net_filename, count_layers=33,
                          count_outputs=1, max_epochs=15000, min_epochs=300):
    net = buildNetwork(count_input_samples, count_layers, count_outputs)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    count_samples = len(train_list)
    for i in range(0, count_samples):
        ds.addSample(train_list[i][:-count_outputs], train_list[i][-count_outputs])
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    a = trainer.trainUntilConvergence(maxEpochs=max_epochs, continueEpochs=min_epochs, validationProportion=0.15)
    net_filename = net_filename[:-4]+str(a[0][-1])+'.xml'
    NetworkWriter.writeToFile(net, net_filename)
    result_list = [a, net_filename]
    return result_list
def trainNetworks(base,nets,ds):
	trainedNets = []
	for (i,n) in enumerate(nets):
		print "training",i
		start = clock()
		trainer = BackpropTrainer(n,ds)
		trainer.train()
		end = clock()
		duration = end - start
		NetworkWriter.writeToFile(trainer.module,base + str(i) + "/net.xml") #Save the network after training
		writeFile(base + str(i) + "/trainTime.txt","The network took " + str(duration) + "seconds to train.") #Record training time
		trainedNets.append(trainer.module)
		print i,duration #Tell you when one has finished training, since these things take a while.
	return trainedNets
Ejemplo n.º 38
0
        def predict_class(self,_x,_y,test_file,epochs,steps):
                print("Iniciando funcao predict_class() .............")


                traindata = self.ReadTrainFile(_x,_y)
                #testdata = self.ReadTestFile( test_file, len(_x[0]) )
                
                print ("____________________________________________________________________________")
                print ("A matrix de treino tem ", len(traindata),"linhas de dados")
                print ("Dimensoes de Input e Output : ", traindata.indim, traindata.outdim)
                print ("____________________________________________________________________________\n")
                

                print("convertendo arquivos .................")

                traindata._convertToOneOfMany( )
                #testdata._convertToOneOfMany( )

                import os.path
                if os.path.exists('rede_animal.xml'):
                    print(" Carregando a rede de treinos do arquivo rede_animal.xml *************** ")
                    fnn = NetworkReader.readFrom('rede_animal.xml')
                else:
                    print(" Criando rede de treinos no arquivo rede_animal.xml *************** ")
                    fnn = buildNetwork( traindata.indim, 5, traindata.outdim, outclass=SoftmaxLayer )

                trainer = BackpropTrainer( fnn, dataset=traindata, momentum=0.1, verbose=True, weightdecay=0.01)

                print("Treinando .............")
                
                for i in range(epochs):
                        print("Treinando epoca ", i)
                        trainer.trainEpochs( steps )
                        NetworkWriter.writeToFile(fnn, 'rede_animal.xml')
                        print(" Rede salva em rede_animal.xml (Ok) ")

                print("Lendo arquivo de teste e classificando ..........")
                print("Gerando resultados em ANIMAL_OUTPUT.CSV ..........")
                output = open('animal_output.csv', 'wb')
                i=1
                output.write("ID,Adoption,Died,Euthanasia,Return_to_owner,Transfer\n")
                for line in open(test_file, 'r'):
                        x = ast.literal_eval(line)
                        output.write( "{},{},{},{},{},{} \n".format(i,fnn.activate( x )[0],fnn.activate( x )[1],fnn.activate( x )[2],fnn.activate( x )[3],fnn.activate( x )[4]) )
                        i=i+1   
                print("Concluido")

                
Ejemplo n.º 39
0
def trainNetwork(net,
                 sample_list,
                 validate_list,
                 net_filename,
                 max_epochs=5500,
                 min_epochs=300):
    count_input_samples = len(sample_list)
    count_outputs = len(validate_list)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    ds.addSample(sample_list, validate_list)
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    trainer.trainUntilConvergence(maxEpochs=max_epochs,
                                  continueEpochs=min_epochs)
    NetworkWriter.writeToFile(net, net_filename)
    return net
    def _SaveNET(self):

        # -----------------------------------------------------------------------
        try:
            self._pr_line();
            print("|  _SaveNET(self): \n");
            start_time = time.time();

            # -----------------------------------------------------------------------
            new_net_name_tmp = "_"+str(self._TRAIN_RATE)[0:6]+"_"+self._NET_NAME;

            print('|  Saving NET to: '+new_net_name_tmp)
            NetworkWriter.writeToFile(self._NET, new_net_name_tmp);

            print("|  Done in: "+str(time.time()-start_time)+'sec');
        except Exception as _save_err:
            print('|  Unable to save NET: '+str(_save_err));
Ejemplo n.º 41
0
def trainNet():
	global net
	print "Preparing dataset ..."
	ds = ClassificationDataSet(13, 1, nb_classes=4)
	for data in zip(train_data, train_results):
		ds.addSample(tuple(data[0].values()), [data[1]])
	ds._convertToOneOfMany()

	print "Training network ..."
	net = buildNetwork(ds.indim, 5, ds.outdim, outclass=SoftmaxLayer)
	trainer = BackpropTrainer(net, dataset=ds, momentum=0.1, verbose=True, weightdecay=0.01)
	
	for i in range(10):
		print "Training loop %d ..."%i
		trainer.trainEpochs(3)

	print "Saving network ..."
	NetworkWriter.writeToFile(net, 'network.xml')
Ejemplo n.º 42
0
def createAndTrainNetworkFromFile(curs_filename, count_input_samples, count_samples, net_filename, count_layers=33,
                          count_outputs=1, max_epochs=15000, min_epochs=300):
    net = buildNetwork(count_input_samples, count_layers, count_outputs)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    wb = load_workbook(filename=curs_filename)
    ws = wb.active
    for i in range(0, count_samples):
        loaded_data = []
        for j in range(0, count_input_samples + 1):
            loaded_data.append(round(float(ws.cell(row=i+1, column=j+1).value), 4))
            #ds.addSample(loaded_data[:-1], loaded_data[-1])
        #print loaded_data[:-1], loaded_data[-1]
        ds.addSample(loaded_data[:-1], loaded_data[-1])
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    a = trainer.trainUntilConvergence(maxEpochs=max_epochs, continueEpochs=min_epochs, validationProportion=0.15)
    net_filename = net_filename[:-4]+str(a[0][-1])+'.xml'
    NetworkWriter.writeToFile(net, net_filename)
    result_list = [a, net_filename]
    return result_list
def bpnn():
    ds = SupervisedDataSet(9, 1)
    inputPatterns = Read()

    for p in inputPatterns:
        inputs = p[0]
        targets = p[1]

        inputs = tuple(map(lambda n: float(n) / 6000, inputs))
        targets = tuple(map(lambda n: float(n) / 6000, targets))
        ds.addSample(inputs, targets)
        print(inputs)
        print(targets)
    net = buildNetwork(9, 14, 1)
    trainer = BackpropTrainer(net, ds, verbose=True, learningrate=0.01)
    trainer.trainEpochs(2500)
    trainer.trainUntilConvergence(maxEpochs=3500)

    # save net
    NetworkWriter.writeToFile(net, '/home/wtq/BigData-MachineLearning/Bpnn/BusWorkNet.xml')
Ejemplo n.º 44
0
def network_training(ds):
    """ Builiding and training network. """
    print "network training ..."
    tries = 2
    bias = True
    fast = False
    previous_error = 100
    epochs = 60
    layer_dim = 1
    for _ in xrange(tries):
        print " try: %4d" % _
        train_ds, test_ds = ds.splitWithProportion(0.7)
        try_net = buildNetwork(train_ds.indim, int(train_ds.indim*layer_dim), train_ds.outdim, hiddenclass=SigmoidLayer, outclass=SoftmaxLayer, bias=bias, fast=fast)
        trainer = BackpropTrainer(try_net, train_ds)
        trainer.trainEpochs(epochs)
        for mod in try_net.modules:
            print "Module:", mod.name
            if mod.paramdim > 0:
                print "--parameters:", mod.params
            for conn in try_net.connections[mod]:
                print "-connection to", conn.outmod.name
                if conn.paramdim > 0:
                    print "-parameters", conn.params
            if hasattr(try_net, "recurrentConns"):
                print "Recurrent connections"
                for conn in try_net.recurrentConns:             
                    print "-", conn.inmod.name, " to", conn.outmod.name
                    if conn.paramdim > 0:
                        print "- parameters", conn.params
        trnresult = percentError(trainer.testOnClassData(), train_ds['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=test_ds ), test_ds['class'])
        #print test_ds['target']
        print "epoch: %4d" % trainer.totalepochs, \
        " train error: %5.2f%%" % trnresult, \
        " test error: %5.2f%%" % tstresult
        if tstresult < previous_error:
            net = try_net
            previous_error = tstresult
            NetworkWriter.writeToFile(net, "net.xml")
            layer_dim = layer_dim * 2
Ejemplo n.º 45
0
def trainNet():
    global net
    print "Preparing dataset ..."
    ds = ClassificationDataSet(13, 1, nb_classes=4)
    for data in zip(train_data, train_results):
        ds.addSample(tuple(data[0].values()), [data[1]])
    ds._convertToOneOfMany()

    print "Training network ..."
    net = buildNetwork(ds.indim, 5, ds.outdim, outclass=SoftmaxLayer)
    trainer = BackpropTrainer(net,
                              dataset=ds,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)

    for i in range(10):
        print "Training loop %d ..." % i
        trainer.trainEpochs(3)

    print "Saving network ..."
    NetworkWriter.writeToFile(net, 'network.xml')
Ejemplo n.º 46
0
def runTest(hidden_layer = 3, learning_rate = 0.1, momentum = 0.5, epochs = 5000, filename='RCNetwork2.xml'):
    ds = buildDataSet()
    tstdata, trndata = ds.splitWithProportion(0.25)
    trndata._convertToOneOfMany()
    tstdata._convertToOneOfMany()
    net = buildNetwork(hidden_layer)
    #define the connections
    trainer = BackpropTrainer(net, dataset=trndata, momentum=momentum, verbose=False, weightdecay=learning_rate)
    #trainer = BackpropTrainer(net, learningrate = 0.01, dataset = ds, momentum = 0.99, verbose = True)
    trainer.trainEpochs(epochs)
    trnresult = percentError( trainer.testOnClassData(),
                              trndata['class'] )
    tstresult = percentError( trainer.testOnClassData(
           dataset=tstdata ), tstdata['class'] )
    print filename
    print "epoch: %4d" % trainer.totalepochs, \
          "  train error: %5.2f%%" % trnresult, \
          "  test error: %5.2f%%" % tstresult
    #trainer.train()
    print 'Final weights:', net.params

    NetworkWriter.writeToFile(net, filename)
Ejemplo n.º 47
0
def main():
	emotion={}
	dataset__generator(emotion)
	print('dataset generated')
	tstdata,trndata=ds.splitWithProportion(0.50)
	print('data splitted')
	#ds.getLength()
	trndata._convertToOneOfMany( )
	tstdata._convertToOneOfMany( )
	emotion={}
	if os.path.isfile('train.xml'):
		fnn=NetworkReader.readFrom('train.xml')
	else:
		fnn=buildNetwork(1292,3,2,outclass=SoftmaxLayer)
	NetworkWriter.writeToFile(fnn, 'train.xml')
	print('starting training')
	trainer=BackpropTrainer(fnn,dataset=trndata,momentum=0.1,verbose=True,weightdecay=0.01)	
	
	print('epoch level '+str(1000))
	i=10
	j1=range(10,200)
	temp=[]
	t=1
	while t<10:
		t=t+1
		i=random.choice(j1)
		temp.append(i)
		print('starting '+str(i))
		time.sleep(1)
		trainer.trainEpochs(i)
		NetworkWriter.writeToFile(fnn, 'train.xml')
		trnresult=percentError(trainer.testOnData(),trndata['class'])
		tstresult=percentError(trainer.testOnClassData(dataset=tstdata),tstdata['class'])
		temp.append([trnresult,tstresult])
		r_server.set('errortest'+str(i),tstresult)
		r_server.set('errortrain'+str(i),trnresult)
		
	for i in temp:
		print(i)
Ejemplo n.º 48
0
def createAndTrainNetworkFromList(train_list,
                                  count_input_samples,
                                  net_filename,
                                  count_layers=33,
                                  count_outputs=1,
                                  max_epochs=15000,
                                  min_epochs=300):
    net = buildNetwork(count_input_samples, count_layers, count_outputs)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    count_samples = len(train_list)
    for i in range(0, count_samples):
        ds.addSample(train_list[i][:-count_outputs],
                     train_list[i][-count_outputs])
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    a = trainer.trainUntilConvergence(maxEpochs=max_epochs,
                                      continueEpochs=min_epochs,
                                      validationProportion=0.15)
    net_filename = net_filename[:-4] + str(a[0][-1]) + '.xml'
    NetworkWriter.writeToFile(net, net_filename)
    result_list = [a, net_filename]
    return result_list
    20,  # number of hidden units
    3,
    bias=True,
    hiddenclass=TanhLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
    5,
    15,  # number of hidden units
    15,  # number of hidden units
    3,
    bias=True,
    hiddenclass=TanhLayer,
    outclass=LinearLayer)
#initialize the structures
net2.randomize()
net2.sortModules()
net4.randomize()
net4.sortModules()
#create trainers
#train for set amount of epochs
#save networks to disc
trainer2 = BackpropTrainer(net2, ds2, verbose=True)
trainer2.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net2, net_fold + 'network_Type2H1_TanH_NewSTD.xml')
trainer4 = BackpropTrainer(net4, ds2, verbose=True)
trainer4.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net4, net_fold + 'network_Type2H2__TanH_NewSTD.xml')

print 'Work completed. Check out the networks have been saved'
    20,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
    5,
    15,  # number of hidden units
    15,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)
#initialize the structures
net2.randomize()
net2.sortModules()
net4.randomize()
net4.sortModules()
#create trainers
#train for set amount of epochs
#save networks to disc
trainer2 = BackpropTrainer(net2, ds2, verbose=True)
trainer2.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net2, net_fold + 'network_Type2H1_Both100.xml')
trainer4 = BackpropTrainer(net4, ds2, verbose=True)
trainer4.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net4, net_fold + 'network_Type2H2_Both100.xml')

print 'Work completed. Check out the networks have been saved'
Ejemplo n.º 51
0
                                  verbose=verbose,
                                  weightdecay=0.01)
        trainer.trainEpochs(epochs)

        log.warning('Computing train and test errors...')
        trnresult = percentError(trainer.testOnClassData(), trndata['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['class'])
        print "epoch: %4d" % trainer.totalepochs, \
              "  train error: %5.2f%%" % trnresult, \
              "  test error: %5.2f%%" % tstresult
        if tstresult < previous_error:
            fnn = try_fnn
            previous_error = tstresult

    NetworkWriter.writeToFile(fnn, 'nn.xml')

    log.warning('Activating NeuralNetwork...')
    nginx_log = ClassificationDataSet(len(dictionary), 1, nb_classes=2)
    add_samples_to_training_set(nginx_log, options.log_file, 0)
    nginx_log._convertToOneOfMany(
    )  # this is still needed to make the fnn feel comfy

    out = fnn.activateOnDataset(nginx_log)
    out = out.argmax(axis=1)  # the highest output activation gives the class

    with open(options.log_file) as log_file:
        cnt = 0
        for line in log_file:
            try:
                entry = LogEntry(*nginx_log_re.match(line).groups())
Ejemplo n.º 52
0
    20,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
    5,
    15,  # number of hidden units
    15,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)
#initialize the structures
net2.randomize()
net2.sortModules()
net4.randomize()
net4.sortModules()
#create trainers
#train for set amount of epochs
#save networks to disc
trainer2 = BackpropTrainer(net2, ds2, verbose=True)
trainer2.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net2, net_fold + 'network_Type2H1_Opt.xml')
trainer4 = BackpropTrainer(net4, ds2, verbose=True)
trainer4.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net4, net_fold + 'network_Type2H2_Opt.xml')

print 'Work completed. Check out the networks have been saved'
Ejemplo n.º 53
0
trainer = BackpropTrainer(net,
                          dataset=trainData,
                          momentum=0.0,
                          learningrate=0.01,
                          weightdecay=0.01,
                          verbose=True)

trainer.trainUntilConvergence()

print(trainData.indim)

print(testData.indim)

#a test to show the digits in the dataset, try changing the 2 and it will blwo your min
"""plt.gray()
plt.matshow(digits.images[2])
plt.show()"""

#set the epochs
#trainer.trainEpochs(5)
NetworkWriter.writeToFile(net, 'dig.xml')

#print net.activate(t)

#print results
#print 'Percent Error dataset: ', percentError(trainer.testOnClassData(
#    dataset=testData)
#    , testData['class'])

exit(0)
Ejemplo n.º 54
0
# # plot(signal.medfilt(pred, 121))
# # subplot(2,1,1)
# # vlines(N_train, -1, 1, linestyles='dotted')
# # plot(p)

# t = np.arange(len(s)) / 250.0

# clf()

# subplot(2,1,1)
# vlines(N_train / 250.0, -2, 2, linestyles='solid')
# vlines(N_test_end / 250.0, -2, 2, linestyles='dashed')
# plot(t, s)
# plot(t, y)

# subplot(2,1,2)
# vlines(N_train / 250.0, 0, 2, linestyles='solid')
# hlines(1.0, 0, max(t), linestyles='dashed')
# plot(t, error)

# show(block=False)

# #neigh.fit(X_new, y)

print("writing model")
# joblib.dump([neigh, good_features], 'neighbors_model.pkl', compress=4)
NetworkWriter.writeToFile(fnn, 'neural_net.xml')
joblib.dump(good_indexes, 'neural_model_features.pkl', compress=5)

# neigh, good_features = joblib.load('neighbors_model.pkl')
Ejemplo n.º 55
0
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
    5,
    15,  # number of hidden units
    15,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)
#initialize the structures
net2.randomize()
net2.sortModules()
net4.randomize()
net4.sortModules()
#create trainers
#train for set amount of epochs
#save networks to disc
trainer2 = BackpropTrainer(net2, ds2, verbose=True)
trainer2.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net2,
                          net_fold + 'network_Type2H1NewSTD_LessNoise.xml')
trainer4 = BackpropTrainer(net4, ds2, verbose=True)
trainer4.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net4,
                          net_fold + 'network_Type2H2NewSTD_LessNoise.xml')

print 'Work completed. Check out the networks have been saved'
Ejemplo n.º 56
0
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
    5,
    15,  # number of hidden units
    15,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)
#initialize the structures
net2.randomize()
net2.sortModules()
net4.randomize()
net4.sortModules()
#create trainers
#train for set amount of epochs
#save networks to disc
trainer2 = BackpropTrainer(net2, ds2, verbose=True)
trainer2.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net2,
                          net_fold + 'network_Type2H1_Both_LessNoise.xml')
trainer4 = BackpropTrainer(net4, ds2, verbose=True)
trainer4.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net4,
                          net_fold + 'network_Type2H2_Both_LessNoise.xml')

print 'Work completed. Check out the networks have been saved'
Ejemplo n.º 57
0
    return ls;



#读入字符串
dataset = readFromCsv("cbf");
#化为float
numdataset = np.array(dataset,dtype=np.float64);
#原始分割为两组
trainingset,vdataset = dataSplit(numdataset);
# print(len(trainingset),len(vdataset));
#分别归一化
gytdataset = gyData(trainingset);
gyvdataset = gyData(vdataset);




#下面的是训练神经网络

# #最终的训练集,用归一化的数据来构成训练集
bts = buildTrainingSet(gytdataset);
# ll = [3382.9879,3384.0262,3358.7953,3373.3446,179423841,2.31148615058,4.4,4.4,4.35,4.36,0.4556,4518585,19794038.0,4363744000.0,4363744000.0];
# print(calTodayFeature(ll,trainingset));
net = buildNetwork(15, 4, 2, bias=True,hiddenclass=SigmoidLayer,outclass=SigmoidLayer)
trainer = BackpropTrainer(net, bts)
trainer.trainEpochs(epochs=100);
NetworkWriter.writeToFile(net, '../net/jxkj_4l_100t.xml')
#

print(ve.calRightRate(gyvdataset,net));
Ejemplo n.º 58
0
    20,  # number of hidden units
    3,
    bias=True,
    hiddenclass=TanhLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
    5,
    15,  # number of hidden units
    15,  # number of hidden units
    3,
    bias=True,
    hiddenclass=TanhLayer,
    outclass=LinearLayer)
#initialize the structures
net2.randomize()
net2.sortModules()
net4.randomize()
net4.sortModules()
#create trainers
#train for set amount of epochs
#save networks to disc
trainer2 = BackpropTrainer(net2, ds2, verbose=True)
trainer2.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net2, net_fold + 'network_Type2H1_TanH_Both.xml')
trainer4 = BackpropTrainer(net4, ds2, verbose=True)
trainer4.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net4, net_fold + 'network_Type2H2_TanH_Both.xml')

print 'Work completed. Check out the networks have been saved'
Ejemplo n.º 59
0
# Create a new backpropagation trainer
print "Creating backpropagation trainer..."
#trainer = BackpropTrainer(fnn, dataset=trndata, momentum=0.1, learningrate=0.01 , verbose=True, weightdecay=0.01)
trainer = BackpropTrainer(fnn,
                          dataset=trndata,
                          momentum=0.1,
                          learningrate=0.01,
                          verbose=True,
                          weightdecay=0.01)

# Perform epoch training
if numEpochs == 0:
    print "Beginning epoch until convergence..."
    start = timer()
    outp = trainer.trainUntilConvergence(verbose=True)
    end = timer()
    print "Time taken for " + str(len(
        outp[0])) + " epochs: " + str(end - start)
else:
    print "Beginning " + str(numEpochs) + " training epochs..."
    start = timer()
    trainer.trainEpochs(numEpochs)
    end = timer()
    print "Time taken for " + str(numEpochs) + " epochs: " + str(end - start)

print 'Percent Error on Test dataset: ', percentError(
    trainer.testOnClassData(dataset=tstdata), tstdata['class'])

print "Writing neural network to file..."
NetworkWriter.writeToFile(fnn, 'VegeTable_PyBrain_Neural_Network_Banana.xml')
# has bias

# Say the number of inputs and outputs of data set
ds = SupervisedDataSet(4, 3)
# Input the data sets
for sample in range(0, (len(ArrangeDataLuteinMultyDelta.InLuteinMultyDelta))):
    ds.addSample(ArrangeDataLuteinMultyDelta.InLuteinMultyDelta[sample],
                 ArrangeDataLuteinMultyDelta.OutLuteinMultyDelta[sample])
print "trainning data ..."
# Train the data
trainer = BackpropTrainer(net1, ds)
#trainer.trainUntilConvergence()
trainer.trainEpochs(100)
#trainer.train()

NetworkWriter.writeToFile(net1, 'net_Lutein_Multyin_100_delta_86_epoch100.xml')

# Loop to train in a loop until best quality is obtained

# List of results
x_result = []
n_result = []
lu_result = []

dx_result = []
dn_result = []
dlu_result = []

# DB is double blind
x_resultDB = []
n_resultDB = []