Example #1
0
    def train_momentum(self, input, target, eta = 0.2, momentum = 0.8, \
                        maxiter = 10000, disp = 0):
        """
        Simple backpropagation training with momentum.

        :Parameters:
            input : 2-D array
                Array of input patterns
            target : 2-D array
                Array of network targets
            eta : float, optional
                Learning rate
            momentum : float, optional
                Momentum coefficient
            maxiter : integer, optional
                Maximum number of iterations
            disp : bool
                If True convergence method is displayed
        """
        input, target = self._setnorm(input, target)
        if disp:
            err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                   self.inno, self.outno, input, target)
            print "Initial error --> 0.5*(sum of squared errors at output): %.15f" %err
        self.weights = netprop.momentum(self.weights, self.conec, self.bconecno, \
                                        self.units, self.inno, self.outno, input, \
                                        target, eta, momentum, maxiter)
        if disp:
            err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                   self.inno, self.outno, input, target)
            print "Final error   --> 0.5*(sum of squared errors at output): %.15f" %err
Example #2
0
    def train_rprop(self, input, target, \
                    a = 1.2, b = 0.5, mimin = 0.000001, mimax = 50., \
                    xmi = 0.1, maxiter = 10000, disp = 0):
        """
        Rprop training algorithm.

        :Parameters:
            input : 2-D array
                Array of input patterns
            target : 2-D array
                Array of network targets
            a : float, optional
                Training step increasing parameter
            b : float, optional
                Training step decreasing parameter
            mimin : float, optional
                Minimum training step
            mimax : float, optional
                Maximum training step
            xmi : array (or float), optional
                Array containing initial training steps for weights.
                If *xmi* is a scalar then its value is set for all weights
            maxiter : integer, optional
                Maximum number of iterations
            disp : bool
                If True convergence method is displayed. Default is *False*

        :Returns:
            xmi : array
                Computed array of training steps to be used in eventual further
                training calls.
        """
        input, target = self._setnorm(input, target)
        if type(xmi).__name__ in ['float', 'int']:
            xmi = [ xmi ]*len(self.conec)

        if disp:
            err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                   self.inno, self.outno, input, target)
            print "Initial error --> 0.5*(sum of squared errors at output): %.15f" %err
        self.weights, xmi = netprop.rprop(self.weights, self.conec, self.bconecno, \
                                          self.units, self.inno, self.outno, input, \
                                          target, a, b, mimin, mimax, xmi, maxiter)
        if disp:
            err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                   self.inno, self.outno, input, target)
            print "Final error   --> 0.5*(sum of squared errors at output): %.15f" %err
        return xmi
Example #3
0
    def sqerror(self, input, target):
        """
        Calculates sum of squared errors at network output.

        Error is calculated for **normalized** input and target arrays.

        :Parameters:
            input : 2-D array
                Array of input patterns
            target : 2-D array
                Array of network targets

        :Returns:
            err : float
                0.5*(sum of squared errors at network outputs)

        .. note::
            This function might be slow in frequent use, because data
            normalization is performed at each call. Usually there's no need
            to use this function, unless you need to adopt your own training
            strategy.
        """
        input, target = self._setnorm(input, target)
        err  = netprop.sqerror(self.weights, self.conec, self.units, \
                               self.inno, self.outno, input, target)
        return err
Example #4
0
 def sqerror(self, input, target):
     """
     Returns 0.5*(sum of squared errors at output)
     for input and target arrays being first normalized.
     Might be slow in frequent use, because data normalization is
     performed at each call.
     """
     input, target = self._setnorm(input, target)
     err  = netprop.sqerror(self.weights, self.conec, self.units, \
                            self.inno, self.outno, input, target)
     return err
Example #5
0
 def sqerror(self, input, target):
     """
     Returns 0.5*(sum of squared errors at output)
     for input and target arrays being first normalized.
     Might be slow in frequent use, because data normalization is
     performed at each call.
     """
     input, target = self._setnorm(input, target)
     err  = netprop.sqerror(self.weights, self.conec, self.units, \
                            self.inno, self.outno, input, target)
     return err
Example #6
0
 def train_momentum(self, input, target, eta = 0.2, momentum = 0.8, \
                     maxiter = 10000, disp = 0):
     """
     Simple backpropagation training with momentum.
 
     Allowed parameters:
     eta             - descent scaling parameter (default is 0.2)
     momentum        - momentum coefficient (default is 0.8)
     maxiter         - the maximum number of iterations (default is 10000)
     disp            - print convergence message if non-zero (default is 0)
     """
     input, target = self._setnorm(input, target)
     if disp:
         err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                self.inno, self.outno, input, target)
         print "Initial error --> 0.5*(sum of squared errors at output): %.15f" %err
     self.weights = netprop.momentum(self.weights, self.conec, self.bconecno, \
                                     self.units, self.inno, self.outno, input, \
                                     target, eta, momentum, maxiter)
     if disp:
         err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                self.inno, self.outno, input, target)
         print "Final error   --> 0.5*(sum of squared errors at output): %.15f" %err
Example #7
0
 def train_momentum(self, input, target, eta = 0.2, momentum = 0.8, \
                     maxiter = 10000, disp = 0):
     """
     Simple backpropagation training with momentum.
 
     Allowed parameters:
     eta             - descent scaling parameter (default is 0.2)
     momentum        - momentum coefficient (default is 0.8)
     maxiter         - the maximum number of iterations (default is 10000)
     disp            - print convergence message if non-zero (default is 0)
     """
     input, target = self._setnorm(input, target)
     if disp:
         err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                self.inno, self.outno, input, target)
         print "Initial error --> 0.5*(sum of squared errors at output): %.15f" % err
     self.weights = netprop.momentum(self.weights, self.conec, self.bconecno, \
                                     self.units, self.inno, self.outno, input, \
                                     target, eta, momentum, maxiter)
     if disp:
         err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                self.inno, self.outno, input, target)
         print "Final error   --> 0.5*(sum of squared errors at output): %.15f" % err
Example #8
0
 def train_rprop(self, input, target, \
                 a = 1.2, b = 0.5, mimin = 0.000001, mimax = 50., \
                 xmi = 0.1, maxiter = 10000, disp = 0):
     """
     Rprop training algorithm.
     
     Allowed parameters:
     a               - training step increasing parameter (default is 1.2)
     b               - training step decreasing parameter (default is 0.5)
     mimin           - minimum training step (default is 0.000001)
     mimax           - maximum training step (default is 50.)
     xmi             - vector containing initial training steps for weights;
                       if 'xmi' is a scalar then its value is set for all
                       weights (default is 0.1)
     maxiter         - the maximum number of iterations (default is 10000)
     disp            - print convergence message if non-zero (default is 0)
     
     Method updates network weights and returns 'xmi' vector 
     (after 'maxiter' iterations).
     """
     input, target = self._setnorm(input, target)
     if type(xmi).__name__ in ['float', 'int']:
         xmi = [ xmi ]*len(self.conec)
     
     if disp:
         err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                self.inno, self.outno, input, target)
         print "Initial error --> 0.5*(sum of squared errors at output): %.15f" %err
     self.weights, xmi = netprop.rprop(self.weights, self.conec, self.bconecno, \
                                       self.units, self.inno, self.outno, input, \
                                       target, a, b, mimin, mimax, xmi, maxiter)
     if disp:
         err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                self.inno, self.outno, input, target)
         print "Final error   --> 0.5*(sum of squared errors at output): %.15f" %err
     return xmi
Example #9
0
    def train_rprop(self, input, target, \
                    a = 1.2, b = 0.5, mimin = 0.000001, mimax = 50., \
                    xmi = 0.1, maxiter = 10000, disp = 0):
        """
        Rprop training algorithm.
        
        Allowed parameters:
        a               - training step increasing parameter (default is 1.2)
        b               - training step decreasing parameter (default is 0.5)
        mimin           - minimum training step (default is 0.000001)
        mimax           - maximum training step (default is 50.)
        xmi             - vector containing initial training steps for weights;
                          if 'xmi' is a scalar then its value is set for all
                          weights (default is 0.1)
        maxiter         - the maximum number of iterations (default is 10000)
        disp            - print convergence message if non-zero (default is 0)
        
        Method updates network weights and returns 'xmi' vector 
        (after 'maxiter' iterations).
        """
        input, target = self._setnorm(input, target)
        if type(xmi).__name__ in ['float', 'int']:
            xmi = [xmi] * len(self.conec)

        if disp:
            err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                   self.inno, self.outno, input, target)
            print "Initial error --> 0.5*(sum of squared errors at output): %.15f" % err
        self.weights, xmi = netprop.rprop(self.weights, self.conec, self.bconecno, \
                                          self.units, self.inno, self.outno, input, \
                                          target, a, b, mimin, mimax, xmi, maxiter)
        if disp:
            err  = netprop.sqerror(self.weights, self.conec, self.units, \
                                   self.inno, self.outno, input, target)
            print "Final error   --> 0.5*(sum of squared errors at output): %.15f" % err
        return xmi
Example #10
0
 def sqerror(self, input, target):
     """
     Returns 0.5*(sum of squared errors at output)
     for input and target arrays being first normalized.
     Might be slow in frequent use, because data normalization is
     performed at each call.
     
     Warning:
     _setnorm should be called before sqerror - will be changed in future.
     """
     input, target = self._testdata(input, target)
     input = _normarray(input, self.eni) #Normalization data might be uninitialized here!
     target = _normarray(target, self.eno)
     err  = netprop.sqerror(self.weights, self.conec, self.units, \
                            self.inno, self.outno, input, target)
     return err
Example #11
0
 def sqerror(self, input, target):
     """
     Returns 0.5*(sum of squared errors at output)
     for input and target arrays being first normalized.
     Might be slow in frequent use, because data normalization is
     performed at each call.
     
     Warning:
     _setnorm should be called before sqerror - will be changed in future.
     """
     input, target = self._testdata(input, target)
     input = _normarray(
         input, self.eni)  #Normalization data might be uninitialized here!
     target = _normarray(target, self.eno)
     err  = netprop.sqerror(self.weights, self.conec, self.units, \
                            self.inno, self.outno, input, target)
     return err