コード例 #1
0
ファイル: ChainValidation.py プロジェクト: cooczk/ForUbuntub
         trainer = RPropMinusTrainer(net, dataset=train_ds)
         train_errors = []
         # save errors for plotting later
         EPOCHS_PER_CYCLE = 10
         CYCLES = 10
         EPOCHS = EPOCHS_PER_CYCLE * CYCLES

         for a in xrange(CYCLES):
            trainer.trainEpochs(EPOCHS_PER_CYCLE)
            train_errors.append(trainer.testOnData())
            epoch = (a+1) * EPOCHS_PER_CYCLE
            print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
            stdout.flush()

         print("final error for training =", train_errors[-1])
         err_tst = ModuleValidator.validate(Validator.MSE, net, dataset=test_ds)
         eval_err.append(err_tst)
         modnet.append(net)
         print("test_Err", err_tst)

       print(eval_err)
       pmin = eval_err.index(min(eval_err))
       print(pmin)
       net = modnet[pmin]
       hypernet.append(net)
       hypereval.append(min(eval_err))

    hypermin = hypereval.index(min(eval_err))
    net = hypernet[hypermin]
    print("number of hidden layers", hypermin+1)
コード例 #2
0
ファイル: neuralnets.py プロジェクト: HKou/pybrain
    def runTraining(self, convergence=0, **kwargs):
        """ Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
        whether test error is going down again, and stop training accordingly. 
        CAVEAT: No support for Sequential datasets!"""
        assert isinstance(self.Trainer, Trainer)
        if self.Graph is not None:
            self.Graph.setLabels(x='epoch', y='normalized regression error')
            self.Graph.setLegend(['training','test'],loc='upper right')
        epoch = 0
        inc = self.epoinc
        best_error = Infinity
        best_epoch = 0
        learncurve_x = [0]
        learncurve_y = [0.0]
        valcurve_y = [0.0]
        converged = False
        convtest = 0
        if convergence>0:
            logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
        while epoch<=self.maxepochs and not converged:
            self.Trainer.trainEpochs(inc)
            epoch+=inc
            learncurve_x.append(epoch)
            # calculate errors on TRAINING data
            err_trn = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.DS)
            learncurve_y.append(err_trn)
            if self.TDS is None:
                logging.info("epoch: %6d,  err_trn: %10g" % (epoch, err_trn))
            else:
                # calculate same errors on TEST data
                err_tst = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.TDS)
                valcurve_y.append(err_tst)
                if err_tst < best_error:
                    # store best error and parameters
                    best_epoch = epoch
                    best_error = err_tst
                    bestweights = self.Trainer.module.params.copy()
                    convtest = 0
                else:
                    convtest += 1
                logging.info("epoch: %6d,  err_trn: %10g,  err_tst: %10g,  best_tst: %10g" % (epoch, err_trn, err_tst, best_error))
                if self.Graph is not None:
                    self.Graph.addData(1, epoch, err_tst)
                    
                # check if convegence criterion is fulfilled (no improvement after N epoincs)
                if convtest >= convergence:
                    converged = True
                    
            if self.Graph is not None:
                self.Graph.addData(0, epoch, err_trn)
                self.Graph.update()

        # training finished!
        logging.info("Best epoch: %6d, with error: %10g" % (best_epoch, best_error))
        if self.VDS is not None:
            # calculate same errors on VALIDATION data
            self.Trainer.module.params[:] = bestweights.copy()
            err_val = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.VDS)
            logging.info("Result on evaluation data: %10g" % err_val)
        # store training curve for saving into file
        self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
コード例 #3
0
ファイル: neuralnets.py プロジェクト: chenzhikuo1/OCR-Python
    def runTraining(self, convergence=0, **kwargs):
        """ Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
        whether test error is going down again, and stop training accordingly. 
        CAVEAT: No support for Sequential datasets!"""
        assert isinstance(self.Trainer, Trainer)
        if self.Graph is not None:
            self.Graph.setLabels(x='epoch', y='normalized regression error')
            self.Graph.setLegend(['training','test'],loc='upper right')
        epoch = 0
        inc = self.epoinc
        best_error = Infinity
        best_epoch = 0
        learncurve_x = [0]
        learncurve_y = [0.0]
        valcurve_y = [0.0]
        converged = False
        convtest = 0
        if convergence>0:
            logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
        while epoch<=self.maxepochs and not converged:
            self.Trainer.trainEpochs(inc)
            epoch+=inc
            learncurve_x.append(epoch)
            # calculate errors on TRAINING data
            err_trn = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.DS)
            learncurve_y.append(err_trn)
            if self.TDS is None:
                logging.info("epoch: %6d,  err_trn: %10g" % (epoch, err_trn))
            else:
                # calculate same errors on TEST data
                err_tst = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.TDS)
                valcurve_y.append(err_tst)
                if err_tst < best_error:
                    # store best error and parameters
                    best_epoch = epoch
                    best_error = err_tst
                    bestweights = self.Trainer.module.params.copy()
                    convtest = 0
                else:
                    convtest += 1
                logging.info("epoch: %6d,  err_trn: %10g,  err_tst: %10g,  best_tst: %10g" % (epoch, err_trn, err_tst, best_error))
                if self.Graph is not None:
                    self.Graph.addData(1, epoch, err_tst)
                    
                # check if convegence criterion is fulfilled (no improvement after N epoincs)
                if convtest >= convergence:
                    converged = True
                    
            if self.Graph is not None:
                self.Graph.addData(0, epoch, err_trn)
                self.Graph.update()

        # training finished!
        logging.info("Best epoch: %6d, with error: %10g" % (best_epoch, best_error))
        if self.VDS is not None:
            # calculate same errors on VALIDATION data
            self.Trainer.module.params[:] = bestweights.copy()
            err_val = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.VDS)
            logging.info("Result on evaluation data: %10g" % err_val)
        # store training curve for saving into file
        self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
コード例 #4
0
ファイル: PyBrainANNs.py プロジェクト: erdincay/ScoreGrass
 def score(self, x_data, y_datas):
     return ModuleValidator.validate(regression_score, self.net, self._prepare_dataset(x_data, y_datas))
コード例 #5
0
def correct(output, target):
    return ModuleValidator.validate(correctValFunc, output, target)
コード例 #6
0
def correct(output, target):
  return ModuleValidator.validate(correctValFunc, output, target)