コード例 #1
0
 def __init__(self):
     # standard parameters
     self.epsilon = 2.0 #Initial value of sigmas
     self.baseline=0.0 #Moving average baseline, used for sigma adaption
     self.best=-1000000.0 #TODO ersetzen durch -inf
     self.symCount=1.0 #Switch for symetric sampling
     self.gd = GradientDescent()
     self.gdSig = GradientDescent()
     self.wDecay = 0.001 #lasso weight decay (0 to deactivate)
コード例 #2
0
    def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
                 momentum=0., verbose=False, batchlearning=False,
                 weightdecay=0.):
        """Create a BackpropTrainer to train the specified `module` on the
        specified `dataset`.

        The learning rate gives the ratio of which parameters are changed into
        the direction of the gradient. The learning rate decreases by `lrdecay`,
        which is used to to multiply the learning rate after each training
        step. The parameters are also adjusted with respect to `momentum`, which
        is the ratio by which the gradient of the last timestep is used.

        If `batchlearning` is set, the parameters are updated only at the end of
        each epoch. Default is False.

        `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
        decay at all.
        """
        Trainer.__init__(self, module)
        self.setData(dataset)
        self.verbose = verbose
        self.batchlearning = batchlearning
        self.weightdecay = weightdecay
        self.epoch = 0
        self.totalepochs = 0
        # set up gradient descender
        self.descent = GradientDescent()
        self.descent.alpha = learningrate
        self.descent.momentum = momentum
        self.descent.alphadecay = lrdecay
        self.descent.init(module.params)
コード例 #3
0
ファイル: spsa.py プロジェクト: chenzhikuo1/OCR-Python
 def __init__(self):
     # standard parameters
     self.epsilon = 2.0  #Initial value of exploration size
     self.baseline = 0.0  #Moving average baseline, used just for visualisation
     self.best = -1000000.0  #TODO ersetzen durch -inf
     self.symCount = 1.0  #Switch for symetric sampling
     self.gd = GradientDescent()
     self.gamma = 0.9995  #Exploration decay factor
コード例 #4
0
 def _additionalInit(self):
     if self.sigmaLearningRate is None:
         self.sigmaLearningRate = self.learningRate    
     self.gdSig = GradientDescent()
     self.gdSig.alpha = self.sigmaLearningRate
     self.gdSig.rprop = self.rprop
     self.sigList = ones(self.numParameters) * self.epsilon #Stores the list of standard deviations (sigmas)
     self.gdSig.init(self.sigList)
     self.baseline = None
コード例 #5
0
ファイル: fd.py プロジェクト: veronikaKochugova/DropWeak
 def _setInitEvaluable(self, evaluable):
     ContinuousOptimizer._setInitEvaluable(self, evaluable)
     self.current = self._initEvaluable
     self.gd = GradientDescent()
     self.gd.alpha = self.learningRate
     if self.learningRateDecay is not None:
         self.gd.alphadecay = self.learningRateDecay
     self.gd.momentum = self.momentum
     self.gd.rprop = self.rprop
     self.gd.init(self._initEvaluable)
コード例 #6
0
    def __init__(self):
        # gradient descender
        self.gd = GradientDescent()

        # create default explorer
        self._explorer = None

        # loglh dataset
        self.loglh = None

        # network to tie module and explorer together
        self.network = None
コード例 #7
0
    def __init__(self,
                 module,
                 dataset=None,
                 learningrate=0.01,
                 lrdecay=1.0,
                 momentum=0.,
                 verbose=False,
                 batchlearning=False,
                 weightdecay=0.,
                 errfun=None):
        """Create a BackpropTrainer to train the specified `module` on the
        specified `dataset`.

        The learning rate gives the ratio of which parameters are changed into
        the direction of the gradient. The learning rate decreases by
        `lrdecay`, which is used to to multiply the learning rate after each
        training step. The parameters are also adjusted with respect to
        `momentum`, which is the ratio by which the gradient of the last
        timestep is used.

        If `batchlearning` is set, the parameters are updated only at the end
        of each epoch. Default is False.

        `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
        decay at all.

        Arguments:
            errfun (func): Function that takes 2 positional arguments,
                the target (true) and predicted (estimated) output vectors, and
                returns an estimate of the signed distance to the target (true)
                output. default = lambda targ, est: (targ - est))
        """
        Trainer.__init__(self, module)
        self.setData(dataset)
        self.verbose = verbose
        self.batchlearning = batchlearning
        self.weightdecay = weightdecay
        self.epoch = 0
        self.totalepochs = 0
        # set up gradient descender
        self.descent = GradientDescent()
        self.descent.alpha = learningrate
        self.descent.momentum = momentum
        self.descent.alphadecay = lrdecay
        self.descent.init(module.params)
        self.errfun = errfun or abs_error
コード例 #8
0
 def __init__(self):
     self.gd = GradientDescent()
コード例 #9
0
ファイル: basic.py プロジェクト: chenzhikuo1/OCR-Python
 def __init__(self):
     # standard parameters
     self.epsilon = 1.0
     self.gamma = 0.999
     self.gd = GradientDescent()