def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
              momentum=0., verbose=False, batchlearning=False,
              weightdecay=0.):
     """Create a BackpropTrainer to train the specified `module` on the
     specified `dataset`.
     The learning rate gives the ratio of which parameters are changed into
     the direction of the gradient. The learning rate decreases by `lrdecay`,
     which is used to to multiply the learning rate after each training
     step. The parameters are also adjusted with respect to `momentum`, which
     is the ratio by which the gradient of the last timestep is used.
     If `batchlearning` is set, the parameters are updated only at the end of
     each epoch. Default is False.
     `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
     decay at all.
     """
     Trainer.__init__(self, module)
     self.setData(dataset)
     self.verbose = verbose
     self.batchlearning = batchlearning
     self.weightdecay = weightdecay
     self.epoch = 0
     self.totalepochs = 0
     # set up gradient descender
     self.descent = GradientDescent()
     self.descent.alpha = learningrate
     self.descent.momentum = momentum
     self.descent.alphadecay = lrdecay
     self.descent.init(module.params)
Exemplo n.º 2
0
    def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
                 momentum=0., verbose=False, batchlearning=False,
                 weightdecay=0.):
        """Create a BackpropTrainer to train the specified `module` on the
        specified `dataset`.

        The learning rate gives the ratio of which parameters are changed into
        the direction of the gradient. The learning rate decreases by `lrdecay`,
        which is used to to multiply the learning rate after each training
        step. The parameters are also adjusted with respect to `momentum`, which
        is the ratio by which the gradient of the last timestep is used.

        If `batchlearning` is set, the parameters are updated only at the end of
        each epoch. Default is False.

        `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
        decay at all.
        """
        Trainer.__init__(self, module)
        self.setData(dataset)
        self.verbose = verbose
        self.batchlearning = batchlearning
        self.weightdecay = weightdecay
        self.epoch = 0
        self.totalepochs = 0
        # set up gradient descender
        self.descent = GradientDescent()
        self.descent.alpha = learningrate
        self.descent.momentum = momentum
        self.descent.alphadecay = lrdecay
        self.descent.init(module.params)
    def __init__(self,model,dataset,**kwargs):
        ap = KWArgsProcessor(self, kwargs)

        Trainer.__init__(self, model)

        ap = KWArgsProcessor(self, kwargs)

        # misc
        ap.add('verbosity', default=0)

        # population
        ap.add('subPopulationSize', private=True, default=8)
        ap.add('nCombinations', private=True, default=4)
        ap.add('nParents', private=True, default=None)
        ap.add('initialWeightRange', private=True, default=(-0.1, 0.1))
        ap.add('weightInitializer', private=True, default=Randomization(self._initialWeightRange[0], self._initialWeightRange[1]))

        # mutation
        ap.add('mutationAlpha', private=True, default=0.01)
        ap.add('mutationVariate', private=True, default=CauchyVariate(0, self._mutationAlpha))

        # evaluation
        ap.add('wtRatio', private=True, default=(1, 3))

        # burst mutation
        ap.add('nBurstMutationEpochs', default=Infinity)

        # network
        ap.add('backprojectionFactor', private=True, default=float(model.backprojectionFactor))
        model.backprojectionFactor = self._backprojectionFactor

        # aggregated objects
        ap.add('selection', default=EvolinoSelection())
        ap.add('reproduction', default=EvolinoReproduction(mutationVariate=self.mutationVariate))
        ap.add('burstMutation', default=EvolinoBurstMutation())
        ap.add('evaluation', default=EvolinoEvaluation(model, dataset, **kwargs))

        self.model=model
        self.dataset=dataset
        genome = self.model.getGenome()
        self.population = EvolinoPopulation(EvolinoSubIndividual(genome),self.subPopulationSize,self.nCombinations,self.weightInitializer)

        filters = []
        filters.append(self.evaluation)
        filters.append(self.selection)
        filters.append(self.reproduction)

        self._filters = filters

        self.totalepochs = 0
        self._max_fitness = self.evaluation.max_fitness
        self._max_fitness_epoch = self.totalepochs
Exemplo n.º 4
0
 def __init__(self, module, dataset, RHO=0.0001, SIG=0.5, reEvaluate=0.1, 
              extrapolate=3.0, maxEvaluate=20, maxSlope=100.0):
     """ Initialization method. Unlike the normal BackpropTrainer, the
     training dataset must be set during the initialization procedure when
     using conjugate gradient descent because the dataset will be used in
     the cost function and can not be passed as a variable explicitly. 
     """
     assert dataset is not None, "The training dataset can not be empty."
     Trainer.__init__(self, module)
     self.setData(dataset)
     self.epoch = 0
     self.descent = PRConjugateGradientDescent(RHO=RHO, SIG=SIG, reEvaluate=reEvaluate, 
         extrapolate=extrapolate, maxEvaluate=maxEvaluate, maxSlope=maxSlope)
Exemplo n.º 5
0
    def __init__(self,
                 module,
                 dataset=None,
                 learningrate=0.01,
                 lrdecay=1.0,
                 momentum=0.,
                 verbose=False,
                 batchlearning=False,
                 weightdecay=0.,
                 errfun=None):
        """Create a BackpropTrainer to train the specified `module` on the
        specified `dataset`.

        The learning rate gives the ratio of which parameters are changed into
        the direction of the gradient. The learning rate decreases by
        `lrdecay`, which is used to to multiply the learning rate after each
        training step. The parameters are also adjusted with respect to
        `momentum`, which is the ratio by which the gradient of the last
        timestep is used.

        If `batchlearning` is set, the parameters are updated only at the end
        of each epoch. Default is False.

        `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
        decay at all.

        Arguments:
            errfun (func): Function that takes 2 positional arguments,
                the target (true) and predicted (estimated) output vectors, and
                returns an estimate of the signed distance to the target (true)
                output. default = lambda targ, est: (targ - est))
        """
        Trainer.__init__(self, module)
        self.setData(dataset)
        self.verbose = verbose
        self.batchlearning = batchlearning
        self.weightdecay = weightdecay
        self.epoch = 0
        self.totalepochs = 0
        # set up gradient descender
        self.descent = GradientDescent()
        self.descent.alpha = learningrate
        self.descent.momentum = momentum
        self.descent.alphadecay = lrdecay
        self.descent.init(module.params)
        self.errfun = errfun or abs_error
Exemplo n.º 6
0
    def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
                 momentum=0., verbose=False, batchlearning=False,
                 weightdecay=0., errfun=None):
        """Create a BackpropTrainer to train the specified `module` on the
        specified `dataset`.

        The learning rate gives the ratio of which parameters are changed into
        the direction of the gradient. The learning rate decreases by
        `lrdecay`, which is used to to multiply the learning rate after each
        training step. The parameters are also adjusted with respect to
        `momentum`, which is the ratio by which the gradient of the last
        timestep is used.

        If `batchlearning` is set, the parameters are updated only at the end
        of each epoch. Default is False.

        `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
        decay at all.

        Arguments:
            errfun (func): Function that takes 2 positional arguments,
                the target (true) and predicted (estimated) output vectors, and
                returns an estimate of the signed distance to the target (true)
                output. default = lambda targ, est: (targ - est))
        """
        Trainer.__init__(self, module)
        self.setData(dataset)
        self.verbose = verbose
        self.batchlearning = batchlearning
        self.weightdecay = weightdecay
        self.epoch = 0
        self.totalepochs = 0
        # set up gradient descender
        self.descent = GradientDescent()
        self.descent.alpha = learningrate
        self.descent.momentum = momentum
        self.descent.alphadecay = lrdecay
        self.descent.init(module.params)
        self.errfun = errfun or abs_error
Exemplo n.º 7
0
    def __init__(self, evolino_network, dataset, **kwargs):
        """
            :key subPopulationSize: Size of the subpopulations.
            :key nCombinations: Number of times each chromosome is built into an individual. default=1
            :key nParents: Number of individuals left in a subpopulation after selection.
            :key initialWeightRange: Range of the weights of the RNN after initialization. default=(-0.1,0.1)
            :key weightInitializer: Initializer object for the weights of the RNN. default=Randomization(...)
            :key mutationAlpha: The mutation's intensity. default=0.01
            :key mutationVariate: The variate used for mutation. default=CauchyVariate(...)
            :key wtRatio: The quotient: washout-time/training-time. Needed to
                            split the sequences into washout phase and training phase.
            :key nBurstMutationEpochs: Number of epochs without increase of fitness in a row,
                                         before burstmutation is applied. default=Infinity
            :key backprojectionFactor: Weight of the backprojection. Usually
                                         supplied through evolino_network.
            :key selection: Selection object for evolino
            :key reproduction: Reproduction object for evolino
            :key burstMutation: BurstMutation object for evolino
            :key evaluation: Evaluation object for evolino
            :key verbosity: verbosity level
        """
        Trainer.__init__(self, evolino_network)

        self.network = evolino_network
        self.setData(dataset)

        ap = KWArgsProcessor(self, kwargs)

        # misc
        ap.add('verbosity', default=0)

        # population
        ap.add('subPopulationSize', private=True, default=8)
        ap.add('nCombinations', private=True, default=4)
        ap.add('nParents', private=True, default=None)
        ap.add('initialWeightRange', private=True, default=(-0.1, 0.1))
        ap.add('weightInitializer', private=True, default=Randomization(self._initialWeightRange[0], self._initialWeightRange[1]))

        # mutation
        ap.add('mutationAlpha', private=True, default=0.01)
        ap.add('mutationVariate', private=True, default=CauchyVariate(0, self._mutationAlpha))

        # evaluation
        ap.add('wtRatio', private=True, default=(1, 3))

        # burst mutation
        ap.add('nBurstMutationEpochs', default=Infinity)

        # network
        ap.add('backprojectionFactor', private=True, default=float(evolino_network.backprojectionFactor))
        evolino_network.backprojectionFactor = self._backprojectionFactor

        # aggregated objects
        ap.add('selection', default=EvolinoSelection())
        ap.add('reproduction', default=EvolinoReproduction(mutationVariate=self.mutationVariate))
        ap.add('burstMutation', default=EvolinoBurstMutation())
        ap.add('evaluation', default=EvolinoEvaluation(evolino_network, self.ds, **kwargs))

        self.selection.nParents = self.nParents

        self._population = EvolinoPopulation(
            EvolinoSubIndividual(evolino_network.getGenome()),
            self._subPopulationSize,
            self._nCombinations,
            self._weightInitializer
            )

        filters = []
        filters.append(self.evaluation)
        filters.append(self.selection)
        filters.append(self.reproduction)

        self._filters = filters

        self.totalepochs = 0
        self._max_fitness = self.evaluation.max_fitness
        self._max_fitness_epoch = self.totalepochs
Exemplo n.º 8
0
    def __init__(self, evolino_network, dataset, **kwargs):
        """
            :key subPopulationSize: Size of the subpopulations.
            :key nCombinations: Number of times each chromosome is built into an individual. default=1
            :key nParents: Number of individuals left in a subpopulation after selection.
            :key initialWeightRange: Range of the weights of the RNN after initialization. default=(-0.1,0.1)
            :key weightInitializer: Initializer object for the weights of the RNN. default=Randomization(...)
            :key mutationAlpha: The mutation's intensity. default=0.01
            :key mutationVariate: The variate used for mutation. default=CauchyVariate(...)
            :key wtRatio: The quotient: washout-time/training-time. Needed to
                            split the sequences into washout phase and training phase.
            :key nBurstMutationEpochs: Number of epochs without increase of fitness in a row,
                                         before burstmutation is applied. default=Infinity
            :key backprojectionFactor: Weight of the backprojection. Usually
                                         supplied through evolino_network.
            :key selection: Selection object for evolino
            :key reproduction: Reproduction object for evolino
            :key burstMutation: BurstMutation object for evolino
            :key evaluation: Evaluation object for evolino
            :key verbosity: verbosity level
        """
        Trainer.__init__(self, evolino_network)

        self.network = evolino_network
        self.setData(dataset)

        ap = KWArgsProcessor(self, kwargs)

        # misc
        ap.add('verbosity', default=0)

        # population
        ap.add('subPopulationSize', private=True, default=8)
        ap.add('nCombinations', private=True, default=4)
        ap.add('nParents', private=True, default=None)
        ap.add('initialWeightRange', private=True, default=(-0.1, 0.1))
        ap.add('weightInitializer',
               private=True,
               default=Randomization(self._initialWeightRange[0],
                                     self._initialWeightRange[1]))

        # mutation
        ap.add('mutationAlpha', private=True, default=0.01)
        ap.add('mutationVariate',
               private=True,
               default=CauchyVariate(0, self._mutationAlpha))

        # evaluation
        ap.add('wtRatio', private=True, default=(1, 3))

        # burst mutation
        ap.add('nBurstMutationEpochs', default=Infinity)

        # network
        ap.add('backprojectionFactor',
               private=True,
               default=float(evolino_network.backprojectionFactor))
        evolino_network.backprojectionFactor = self._backprojectionFactor

        # aggregated objects
        ap.add('selection', default=EvolinoSelection())
        ap.add(
            'reproduction',
            default=EvolinoReproduction(mutationVariate=self.mutationVariate))
        ap.add('burstMutation', default=EvolinoBurstMutation())
        ap.add('evaluation',
               default=EvolinoEvaluation(evolino_network, self.ds, **kwargs))

        self.selection.nParents = self.nParents

        self._population = EvolinoPopulation(
            EvolinoSubIndividual(evolino_network.getGenome()),
            self._subPopulationSize, self._nCombinations,
            self._weightInitializer)

        filters = []
        filters.append(self.evaluation)
        filters.append(self.selection)
        filters.append(self.reproduction)

        self._filters = filters

        self.totalepochs = 0
        self._max_fitness = self.evaluation.max_fitness
        self._max_fitness_epoch = self.totalepochs