Exemple #1
0
    def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
                 momentum=0., verbose=False, batchlearning=False,
                 weightdecay=0.):
        """Create a BackpropTrainer to train the specified `module` on the
        specified `dataset`.

        The learning rate gives the ratio of which parameters are changed into
        the direction of the gradient. The learning rate decreases by `lrdecay`,
        which is used to to multiply the learning rate after each training
        step. The parameters are also adjusted with respect to `momentum`, which
        is the ratio by which the gradient of the last timestep is used.

        If `batchlearning` is set, the parameters are updated only at the end of
        each epoch. Default is False.

        `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
        decay at all.
        """
        Trainer.__init__(self, module)
        self.setData(dataset)
        self.verbose = verbose
        self.batchlearning = batchlearning
        self.weightdecay = weightdecay
        self.epoch = 0
        self.totalepochs = 0
        # set up gradient descender
        self.descent = GradientDescent()
        self.descent.alpha = learningrate
        self.descent.momentum = momentum
        self.descent.alphadecay = lrdecay
        self.descent.init(module.params)
 def __init__(
     self,
     task_name,
     isdebug,
     threshold=0.7,
     gtformat='yxyx',
     gen_box_by_gt=False,
     ol_score=False,
     draw_gt=False,
     logs_path=LOGS_PATH,
     model_path=MODEL_PATH,
 ):
     Trainer.__init__(
         self,
         task_name=task_name,
         isdebug=isdebug,
         logs_path=logs_path,
         model_path=model_path,
     )
     self.cur_loss = 0.0
     self.threshold = threshold
     gtformat = gtformat.lower()
     self.gtformat = gtformat
     if (not (gtformat in ['yxyx', 'xywh', 'mask'])): self.gtformat = 'yxyx'
     self.gen_box_by_gt = gen_box_by_gt
     self.ol_score = ol_score
     self.draw_gt = bool(draw_gt)
Exemple #3
0
 def __init__(self,
   task_name,isdebug,
   logs_path = LOGS_PATH,
   model_path = MODEL_PATH,
   log_ft = True,
   log_grad = True):
   Trainer.__init__(self,task_name=task_name,isdebug=isdebug,logs_path = logs_path,model_path = model_path,)
   self.cur_loss = 0
   self.log_ft = bool(log_ft)
   self.log_grad = bool(log_grad)
   self.log_img_sec = ['ScoreMask','EdgeMask','GTMask','Image',]
Exemple #4
0
 def __init__(self, module, ds_train=None, ds_val=None, gtol = 1e-05, norm = inf, 
              verbose = False, **kwargs):
     """
     Create a BFGSTrainer to train the specified `module` on the
     specified `dataset`.
     """
     Trainer.__init__(self, module)
     self.setData(ds_train)
     self.ds_val = ds_val
     self.verbose = verbose
     self.epoch = 0
     self.totalepochs = 0
     self.train_errors = []
     self.test_errors = []
     self.optimal_params = None
     self.optimal_epoch = 0
     
     self.module = module
Exemple #5
0
 def __init__(self, module, dataset, totalIterations = 100,
              xPrecision = finfo(float).eps, fPrecision = finfo(float).eps,
              init_scg=True, **kwargs):
     """Create a SCGTrainer to train the specified `module` on the
     specified `dataset`.
     """
     Trainer.__init__(self, module)
     self.setData(dataset)
     self.input_sequences = self.ds.getField('input')
     self.epoch = 0
     self.totalepochs = 0
     self.module = module
     #self.tmp_module = module.copy()
     if init_scg:
         self.scg = SCG(self.module.params, self.f, self.df, self,
                        totalIterations, xPrecision, fPrecision,
                        evalFunc = lambda x: str(x / self.ds.getLength()))
     else:
         print "Warning: SCG trainer not initialized!"
Exemple #6
0
 def __init__(
     self,
     task_name,
     isdebug=False,
     logs_path=LOGS_PATH,
     model_path=MODEL_PATH,
 ):
     Trainer.__init__(
         self,
         task_name=task_name,
         isdebug=isdebug,
         logs_path=logs_path,
         model_path=model_path,
     )
     self.cur_rpn_cross_entropy = 0.0
     self.cur_rpn_loss_box = 0.0
     self.cur_cross_entropy = 0.0
     self.cur_loss_box = 0.0
     self.cur_loss = 0.0
     self.cur_gtbox_num = 0.0
Exemple #7
0
 def __init__(self,
              module,
              dataset=None,
              learningrate=0.01,
              lrdecay=1.0,
              momentum=0.,
              verbose=False,
              batchlearning=False,
              weightdecay=0.):
     """Create a BackpropTrainer to train the specified `module` on the 
     specified `dataset`.
     
     The learning rate gives the ratio of which parameters are changed into 
     the direction of the gradient. The learning rate decreases by `lrdecay`, 
     which is used to to multiply the learning rate after each training 
     step. The parameters are also adjusted with respect to `momentum`, which 
     is the ratio by which the gradient of the last timestep is used.
     
     If `batchlearning` is set, the parameters are updated only at the end of
     each epoch. Default is False.
     
     `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
     decay at all. weightdecay is regularisation parameter
     """
     Trainer.__init__(self, module)
     self.setData(dataset)
     self.verbose = verbose
     self.batchlearning = batchlearning
     self.weightdecay = weightdecay
     self.epoch = 0
     self.totalepochs = 0
     # set up gradient descender
     self.descent = GradientDescent()
     self.descent.alpha = learningrate
     self.descent.momentum = momentum
     self.descent.alphadecay = lrdecay
     self.descent.init(module.params)
     self.bestepoch = 0  #xuewen
Exemple #8
0
    def __init__(self, evolino_network, dataset, **kwargs):
        """
            :key subPopulationSize: Size of the subpopulations.
            :key nCombinations: Number of times each chromosome is built into an individual. default=1
            :key nParents: Number of individuals left in a subpopulation after selection.
            :key initialWeightRange: Range of the weights of the RNN after initialization. default=(-0.1,0.1)
            :key weightInitializer: Initializer object for the weights of the RNN. default=Randomization(...)
            :key mutationAlpha: The mutation's intensity. default=0.01
            :key mutationVariate: The variate used for mutation. default=CauchyVariate(...)
            :key wtRatio: The quotient: washout-time/training-time. Needed to
                            split the sequences into washout phase and training phase.
            :key nBurstMutationEpochs: Number of epochs without increase of fitness in a row,
                                         before burstmutation is applied. default=Infinity
            :key backprojectionFactor: Weight of the backprojection. Usually
                                         supplied through evolino_network.
            :key selection: Selection object for evolino
            :key reproduction: Reproduction object for evolino
            :key burstMutation: BurstMutation object for evolino
            :key evaluation: Evaluation object for evolino
            :key verbosity: verbosity level
        """
        Trainer.__init__(self, evolino_network)

        self.network = evolino_network
        self.setData(dataset)

        ap = KWArgsProcessor(self, kwargs)

        # misc
        ap.add('verbosity', default=0)

        # population
        ap.add('subPopulationSize', private=True, default=8)
        ap.add('nCombinations', private=True, default=4)
        ap.add('nParents', private=True, default=None)
        ap.add('initialWeightRange', private=True, default=(-0.1, 0.1))
        ap.add('weightInitializer', private=True, default=Randomization(self._initialWeightRange[0], self._initialWeightRange[1]))

        # mutation
        ap.add('mutationAlpha', private=True, default=0.01)
        ap.add('mutationVariate', private=True, default=CauchyVariate(0, self._mutationAlpha))

        # evaluation
        ap.add('wtRatio', private=True, default=(1, 3))

        # burst mutation
        ap.add('nBurstMutationEpochs', default=Infinity)

        # network
        ap.add('backprojectionFactor', private=True, default=float(evolino_network.backprojectionFactor))
        evolino_network.backprojectionFactor = self._backprojectionFactor

        # aggregated objects
        ap.add('selection', default=EvolinoSelection())
        ap.add('reproduction', default=EvolinoReproduction(mutationVariate=self.mutationVariate))
        ap.add('burstMutation', default=EvolinoBurstMutation())
        ap.add('evaluation', default=EvolinoEvaluation(evolino_network, self.ds, **kwargs))

        self.selection.nParents = self.nParents

        self._population = EvolinoPopulation(
            EvolinoSubIndividual(evolino_network.getGenome()),
            self._subPopulationSize,
            self._nCombinations,
            self._weightInitializer
            )

        filters = []
        filters.append(self.evaluation)
        filters.append(self.selection)
        filters.append(self.reproduction)

        self._filters = filters

        self.totalepochs = 0
        self._max_fitness = self.evaluation.max_fitness
        self._max_fitness_epoch = self.totalepochs