コード例 #1
0
	def check(self):
		"""
		Checks the optimization algorithm's settings and raises an exception if 
		something is wrong. 
		"""
		Optimizer.check(self)
		
		if self.expand<=1.0:
			raise Exception, DbgMsg("NMOPT", "Expansion coefficient should be gerater than 1.")
		
		if self.reflect>self.expand:
			raise Exception, DbgMsg("NMOPT", "Reflection coefficient should be smaller than expansion coefficient.")
		
		if self.reflect<=0.0:
			raise Exception, DbgMsg("NMOPT", "Reflection coefficient should be greater than 0.")
		
		if (self.outerContract<=0.0) or (self.outerContract>=self.reflect):
			raise Exception, DbgMsg("NMOPT", "Outer contraction coefficient should be between 0 and reflection coefficient.")
			
		if (self.innerContract>=0.0) or (self.innerContract<=-1.0):
			raise Exception, DbgMsg("NMOPT", "Inner contraction coefficient must be from (-1,0).")

		if (self.shrink<=0.0) or (self.shrink>=1.0):
			raise Exception, DbgMsg("NMOPT", "Shrink coefficient must be from (0,1).")
			
		if self.reltol<0:
			raise Exception, DbgMsg("NMOPT", "Negative relative tolerance.")
		
		if self.ftol<0:
			raise Exception, DbgMsg("NMOPT", "Negative f tolerance.")
		
		if (self.xtol<0).any():
			raise Exception, DbgMsg("NMOPT", "Negative x tolerance.")
コード例 #2
0
ファイル: grnm.py プロジェクト: ustaros-ai/pyopus
	def reset(self, x0):
		"""
		Puts the optimizer in its initial state and sets the initial point to 
		be the 1-dimensional array or list *x0*. The length of the array 
		becomes the dimension of the optimization problem 
		(:attr:`ndim` member). 
		
		The initial simplex is built around *x0* by calling the 
		:meth:`buildSimplex` method with default values for the *rel* and *abs* 
		arguments. 
		
		If *x0* is a 2-dimensional array or list of size 
		(*ndim*+1) times *ndim* it specifies the initial simplex. 
		
		A corresponding grid is created by calling the :meth:`buildGrid` method. 
		
		The initial value of the natural logarithm of the simplex side vectors 
		determinant is calculated and stored. 
		"""
		# Debug message
		if self.debug:
			DbgMsgOut("GRNMOPT", "Resetting.")
		
		# Make it an array
		x0=array(x0)
		
		# Is x0 a point or a simplex?
		if x0.ndim==1:
			# Point
			# Set x now
			NelderMead.reset(self, x0)
			
			if self.debug:
				DbgMsgOut("GRNMOPT", "Generating initial simplex from initial point.")
				
			sim=self.buildSimplex(x0)
			self._setSimplex(sim)
			self.delta=self.buildGrid()
			self.z=x0
		else:
			# Simplex or error (handled in _setSimplex())
			self._setSimplex(x0)
			self.delta=self.buildGrid()
			self.z=x0[0,:]
			
			if self.debug:
				DbgMsgOut("GRNMOPT", "Using specified initial simplex.")
				
			# Set x to first point in simplex after it was checked in _setSimplex()
			Optimizer.reset(self, x0[0,:])
			
		# Reset point moves counter 
		self.simplexmoves=zeros(self.ndim+1)
		
		# Make x tolerance an array
		self.xtol=array(self.xtol)
コード例 #3
0
ファイル: ga.py プロジェクト: gic888/gdblocks
def test():
    p = ParamRange(np.array([[-1, 1, 10], [0, 5, 10], [-2, 1.1, 10]]))
    s = Store('testga.giclog')
    pars = {'threads': 0, 'size': 20, 'time': 0.1}
    model = Model() # test implementation that return product of params
    model.perfect = 10
    gapars = {'crossover': .9, 'mutation': .05, 'transposition': .001, 'minprop': 0}
    alg = GA_Prop(gapars, p)
    o = Optimizer(pars, model, alg, s)
    o.run()
コード例 #4
0
    def __init__(self, f, x0, range=None, h=0.5, emax=1e-8, imax=1000):
        '''
        Initializes the optimizer.

        To create an optimizer of this type, instantiate the class with the
        parameters given below:

        :Parameters:
          f
            A one variable only function to be optimized. The function should
            have only one parameter and return the function value.
          x0
            First estimate of the minimum. Since this is a linear method, this
            should be a ``float`` or ``int``.
          range
            A range of values might be passed to the algorithm, but it is not
            necessary. If supplied, this parameter should be a tuples of two
            values, ``(x0, x1)``, where ``x0`` is the start of the interval, and
            ``x1`` its end. Obviously, ``x0`` should be smaller than ``x1``.
            When this parameter is present, the algorithm will not let the
            estimates fall outside the given interval.
          h
            The initial step of the search. Defaults to 0.5
          emax
            Maximum allowed error. The algorithm stops as soon as the error is
            below this level. The error is absolute.
          imax
            Maximum number of iterations, the algorithm stops as soon this
            number of iterations are executed, no matter what the error is at
            the moment.
        '''
        Optimizer.__init__(self)
        self.__f = f
        self.__x = x0
        self.range = range
        '''Holds the range for the estimates. If this attribute is set, the
        algorithm will never let the estimates fall outside the given
        interval.'''
        self.__h = h
        self.__emax = float(emax)
        self.__imax = int(imax)
コード例 #5
0
	def __init__(self, function, debug=0, fstop=None, maxiter=None, 
					reflect=1.0, expand=2.0, outerContract=0.5, innerContract=-0.5, shrink=0.5, 
					reltol=1e-15, ftol=1e-15, xtol=1e-9, simplex=None, looseContraction=False):
		Optimizer.__init__(self, function, debug, fstop, maxiter)
		
		# Coefficients
		self.reflect=reflect
		self.expand=expand
		self.outerContract=outerContract
		self.innerContract=innerContract
		self.shrink=shrink
		
		# Stopping condition
		self.reltol=reltol
		self.ftol=ftol
		self.xtol=xtol
		
		# Simplex
		self.simplex=simplex
		
		# Modifications
		self.looseContraction=looseContraction
コード例 #6
0
    def __init__(self, f, x0, emax=1e-8, imax=1000):
        '''
        Initializes the optimizer.

        To create an optimizer of this type, instantiate the class with the
        parameters given below:

        :Parameters:
          f
            A one variable only function to be optimized. The function should
            have only one parameter and return the function value.
          x0
            First estimate of the minimum. The Fibonacci search needs two
            estimates to partition the interval. Thus, the first estimate must
            be a duple ``(xl, xh)``, with the property that ``xl < xh``. Be
            aware, however, that no checking is done -- if the estimate doesn't
            correspond to this condition, in some point an exception will be
            raised.

            Notice that, given the nature of the estimate of the Fibonacci
            method, it is not necessary to have a specific parameter to restrict
            the range of acceptable values -- it is already embedded in the
            estimate. If you need to restrict your estimate between an interval,
            just use its limits as ``xl`` and ``xh`` in the estimate.
          emax
            Maximum allowed error. The algorithm stops as soon as the error is
            below this level. The error is absolute.
          imax
            Maximum number of iterations, the algorithm stops as soon this
            number of iterations are executed, no matter what the error is at
            the moment.
        '''
        Optimizer.__init__(self)
        self.__f = f
        self.__x = x0
        self.__k1 = 1.
        self.__k2 = 1.
        self.__emax = float(emax)
        self.__imax = int(imax)
コード例 #7
0
ファイル: quasinewton.py プロジェクト: NewsJAM/Summarization
    def __init__(self, f, x0, ranges=None, df=None, h=0.1, emax=1e-5, imax=1000):
        '''
        Initializes the optimizer.

        To create an optimizer of this type, instantiate the class with the
        parameters given below:

        :Parameters:
          f
            A multivariable function to be optimized. The function should have
            only one parameter, a multidimensional line-vector, and return the
            function value, a scalar.

          x0
            First estimate of the minimum. Estimates can be given in any format,
            but internally they are converted to a one-dimension vector, where
            each component corresponds to the estimate of that particular
            variable. The vector is computed by flattening the array.

          ranges
            A range of values might be passed to the algorithm, but it is not
            necessary. If supplied, this parameter should be a list of ranges
            for each variable of the objective function. It is specified as a
            list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
            start of the interval, and ``x1`` its end. Obviously, ``x0`` should
            be smaller than ``x1``. It can also be given as a list with a simple
            tuple in the same format. In that case, the same range will be
            applied for every variable in the optimization.

          df
            A function to calculate the gradient vector of the cost function
            ``f``. Defaults to ``None``, if no gradient is supplied, then it is
            estimated from the cost function using Euler equations.

          h
            Convergence step. This method does not takes into consideration the
            possibility of varying the convergence step, to avoid Stiefel cages.

          emax
            Maximum allowed error. The algorithm stops as soon as the error is
            below this level. The error is absolute.

          imax
            Maximum number of iterations, the algorithm stops as soon this
            number of iterations are executed, no matter what the error is at
            the moment.
        '''
        Optimizer.__init__(self)
        self.__f = f
        self.__x = array(x0).ravel()
        if df is None:
            self.__df = gradient(f)
        else:
            self.__df = df
        self.__B = inv(hessian(self.__f)(self.x))
        self.__h = h

        # Determine ranges of the variables
        if ranges is not None:
            ranges = list(ranges)
            if len(ranges) == 1:
                ranges = array(ranges * len(x0[0]))
            else:
                ranges = array(ranges)
        self.ranges = ranges
        '''Holds the ranges for every variable. Although it is a writable
        property, care should be taken in changing parameters before ending the
        convergence.'''

        self.__emax = float(emax)
        self.__imax = int(imax)
コード例 #8
0
	def reset(self, x0):
		"""
		Puts the optimizer in its initial state and sets the initial point to 
		be the 1-dimensional array *x0*. The length of the array becomes the 
		dimension of the optimization problem (:attr:`ndim` member). 
		
		The initial simplex is built around *x0* by calling the 
		:meth:`buildSimplex` method with default values for the *rel* and 
		*abs* arguments. 
		
		If *x0* is a 2-dimensional array of size (*ndim*+1) times *ndim* it 
		specifies the initial simplex. 
		"""
		# Debug message
		if self.debug:
			DbgMsgOut("NM", "Resetting.")
			
		# Make it an array
		x0=array(x0)

		# Is x0 a point or a simplex?
		if x0.ndim==1:
			# Point
			# Set x now
			Optimizer.reset(self, x0)
			
			if self.debug:
				DbgMsgOut("NM", "Generating initial simplex from initial point.")
				
			sim=self.buildSimplex(x0)
			self._setSimplex(sim)
		else:
			# Simplex or error (handled in _setSimplex())
			self._setSimplex(x0)
			
			if self.debug:
				DbgMsgOut("NM", "Using specified initial simplex.")
				
			# Set x to first point in simplex after it was checked in _setSimplex()
			Optimizer.reset(self, x0[0,:])
		
		# Reset point moves counter 
		self.simplexmoves=zeros(self.ndim+1)
		
		# Make x tolerance an array
		self.xtol=array(self.xtol)
		
		# Reset counters
		self.nr=0
		self.ne=0
		self.noc=0
		self.nic=0
		self.ns=0

		self.nrok=0
		self.neok=0
		self.nocok=0
		self.nicok=0
		
		self.icconv=0
		self.occonv=0
コード例 #9
0
ファイル: quasinewton.py プロジェクト: wal613/peach
    def __init__(self,
                 f,
                 x0,
                 ranges=None,
                 df=None,
                 h=0.1,
                 emax=1e-5,
                 imax=1000):
        '''
        Initializes the optimizer.

        To create an optimizer of this type, instantiate the class with the
        parameters given below:

        :Parameters:
          f
            A multivariable function to be optimized. The function should have
            only one parameter, a multidimensional line-vector, and return the
            function value, a scalar.

          x0
            First estimate of the minimum. Estimates can be given in any format,
            but internally they are converted to a one-dimension vector, where
            each component corresponds to the estimate of that particular
            variable. The vector is computed by flattening the array.

          ranges
            A range of values might be passed to the algorithm, but it is not
            necessary. If supplied, this parameter should be a list of ranges
            for each variable of the objective function. It is specified as a
            list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
            start of the interval, and ``x1`` its end. Obviously, ``x0`` should
            be smaller than ``x1``. It can also be given as a list with a simple
            tuple in the same format. In that case, the same range will be
            applied for every variable in the optimization.

          df
            A function to calculate the gradient vector of the cost function
            ``f``. Defaults to ``None``, if no gradient is supplied, then it is
            estimated from the cost function using Euler equations.

          h
            Convergence step. This method does not takes into consideration the
            possibility of varying the convergence step, to avoid Stiefel cages.

          emax
            Maximum allowed error. The algorithm stops as soon as the error is
            below this level. The error is absolute.

          imax
            Maximum number of iterations, the algorithm stops as soon this
            number of iterations are executed, no matter what the error is at
            the moment.
        '''
        Optimizer.__init__(self)
        self.__f = f
        self.__x = array(x0).ravel()
        if df is None:
            self.__df = gradient(f)
        else:
            self.__df = df
        self.__B = inv(hessian(self.__f)(self.x))
        self.__h = h

        # Determine ranges of the variables
        if ranges is not None:
            ranges = list(ranges)
            if len(ranges) == 1:
                ranges = array(ranges * len(x0[0]))
            else:
                ranges = array(ranges)
        self.ranges = ranges
        '''Holds the ranges for every variable. Although it is a writable
        property, care should be taken in changing parameters before ending the
        convergence.'''

        self.__emax = float(emax)
        self.__imax = int(imax)
コード例 #10
0
ファイル: sdnm.py プロジェクト: xanderhsia/pyopus
	def reset(self, x0):
		"""
		Puts the optimizer in its initial state and sets the initial point to 
		be the 1-dimensional array or list *x0*. The length of the array 
		becomes the dimension of the optimization problem 
		(:attr:`ndim` member). 
		
		The initial simplex is built around *x0* by calling the 
		:meth:`buildSimplex` method with default values for the *rel* and *abs* 
		arguments. 
		
		If *x0* is a 2-dimensional array or list of size 
		(*ndim*+1) times *ndim* it specifies the initial simplex. 
		
		The initial value of the natural logarithm of the simplex side vectors 
		determinant is calculated and stored. This value gets updated at every 
		simplex algorithm step. The only time it needs to be reevaluated is at 
		reshape. But that is also quite simple because the reshaped simplex 
		is orthogonal. The only place where a full determinant needs to be 
		calculated is here. 
		"""
		# Debug message
		if self.debug:
			DbgMsgOut("SDNMOPT", "Resetting.")
		
		# Make it an array
		x0=array(x0)
		
		# Is x0 a point or a simplex?
		if x0.ndim==1:
			# Point
			# Set x now
			NelderMead.reset(self, x0)
			
			if self.debug:
				DbgMsgOut("SDNMOPT", "Generating initial simplex from initial point.")
				
			sim=self.buildSimplex(x0)
			self._setSimplex(sim)
		else:
			# Simplex or error (handled in _setSimplex())
			self._setSimplex(x0)
			
			if self.debug:
				DbgMsgOut("SDNMOPT", "Using specified initial simplex.")
				
			# Set x to first point in simplex after it was checked in _setSimplex()
			Optimizer.reset(self, x0[0,:])
		
		# Reset point moves counter 
		self.simplexmoves=zeros(self.ndim+1)
		
		# Calculate log(n! det([v])) where [v] are the n side vectors
		# arranged as columns of a matrix
		(v, l)=self.sortedSideVectors()
		self.logDet=log(abs(det(v)))
		
		# Initial h 
		self.h=1.0
		
		# Make x tolerance an array
		self.xtol=array(self.xtol)
コード例 #11
0
ファイル: multivar.py プロジェクト: NewsJAM/Summarization
    def __init__(self, f, x0, ranges=None, h=0.5, emax=1e-8, imax=1000):
        '''
        Initializes the optimizer.

        To create an optimizer of this type, instantiate the class with the
        parameters given below:

        :Parameters:
          f
            A multivariable function to be optimized. The function should have
            only one parameter, a multidimensional line-vector, and return the
            function value, a scalar.

          x0
            First estimate of the minimum. Estimates can be given in any format,
            but internally they are converted to a one-dimension vector, where
            each component corresponds to the estimate of that particular
            variable. The vector is computed by flattening the array.

          ranges
            A range of values might be passed to the algorithm, but it is not
            necessary. If supplied, this parameter should be a list of ranges
            for each variable of the objective function. It is specified as a
            list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
            start of the interval, and ``x1`` its end. Obviously, ``x0`` should
            be smaller than ``x1``. It can also be given as a list with a simple
            tuple in the same format. In that case, the same range will be
            applied for every variable in the optimization.

          h
            The initial step of the search. Defaults to 0.5

          emax
            Maximum allowed error. The algorithm stops as soon as the error is
            below this level. The error is absolute.

          imax
            Maximum number of iterations, the algorithm stops as soon this
            number of iterations are executed, no matter what the error is at
            the moment.
        '''
        Optimizer.__init__(self)
        self.__f = f
        self.__x = array(x0).ravel()
        n = self.__x.size
        self.__h = ones((n, ))
        self.__h[0] = -0.5
        self.__dx = h * eye(n, 1).reshape(self.__x.shape)

        # Determine ranges of the variables
        if ranges is not None:
            ranges = list(ranges)
            if len(ranges) == 1:
                ranges = array(ranges * len(x0[0]))
            else:
                ranges = array(ranges)
        self.ranges = ranges
        '''Holds the ranges for every variable. Although it is a writable
        property, care should be taken in changing parameters before ending the
        convergence.'''

        self.__emax = float(emax)
        self.__imax = int(imax)
コード例 #12
0
ファイル: sdnm.py プロジェクト: ustaros-ai/pyopus
    def reset(self, x0):
        """
		Puts the optimizer in its initial state and sets the initial point to 
		be the 1-dimensional array or list *x0*. The length of the array 
		becomes the dimension of the optimization problem 
		(:attr:`ndim` member). 
		
		The initial simplex is built around *x0* by calling the 
		:meth:`buildSimplex` method with default values for the *rel* and *abs* 
		arguments. 
		
		If *x0* is a 2-dimensional array or list of size 
		(*ndim*+1) times *ndim* it specifies the initial simplex. 
		
		The initial value of the natural logarithm of the simplex side vectors 
		determinant is calculated and stored. This value gets updated at every 
		simplex algorithm step. The only time it needs to be reevaluated is at 
		reshape. But that is also quite simple because the reshaped simplex 
		is orthogonal. The only place where a full determinant needs to be 
		calculated is here. 
		"""
        # Debug message
        if self.debug:
            DbgMsgOut("SDNMOPT", "Resetting.")

        # Make it an array
        x0 = array(x0)

        # Is x0 a point or a simplex?
        if x0.ndim == 1:
            # Point
            # Set x now
            NelderMead.reset(self, x0)

            if self.debug:
                DbgMsgOut("SDNMOPT",
                          "Generating initial simplex from initial point.")

            sim = self.buildSimplex(x0)
            self._setSimplex(sim)
        else:
            # Simplex or error (handled in _setSimplex())
            self._setSimplex(x0)

            if self.debug:
                DbgMsgOut("SDNMOPT", "Using specified initial simplex.")

            # Set x to first point in simplex after it was checked in _setSimplex()
            Optimizer.reset(self, x0[0, :])

        # Reset point moves counter
        self.simplexmoves = zeros(self.ndim + 1)

        # Calculate log(n! det([v])) where [v] are the n side vectors
        # arranged as columns of a matrix
        (v, l) = self.sortedSideVectors()
        self.logDet = log(abs(det(v)))

        # Initial h
        self.h = 1.0

        # Make x tolerance an array
        self.xtol = array(self.xtol)
コード例 #13
0
ファイル: multivar.py プロジェクト: wal613/peach
    def __init__(self, f, x0, ranges=None, h=0.5, emax=1e-8, imax=1000):
        '''
        Initializes the optimizer.

        To create an optimizer of this type, instantiate the class with the
        parameters given below:

        :Parameters:
          f
            A multivariable function to be optimized. The function should have
            only one parameter, a multidimensional line-vector, and return the
            function value, a scalar.

          x0
            First estimate of the minimum. Estimates can be given in any format,
            but internally they are converted to a one-dimension vector, where
            each component corresponds to the estimate of that particular
            variable. The vector is computed by flattening the array.

          ranges
            A range of values might be passed to the algorithm, but it is not
            necessary. If supplied, this parameter should be a list of ranges
            for each variable of the objective function. It is specified as a
            list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
            start of the interval, and ``x1`` its end. Obviously, ``x0`` should
            be smaller than ``x1``. It can also be given as a list with a simple
            tuple in the same format. In that case, the same range will be
            applied for every variable in the optimization.

          h
            The initial step of the search. Defaults to 0.5

          emax
            Maximum allowed error. The algorithm stops as soon as the error is
            below this level. The error is absolute.

          imax
            Maximum number of iterations, the algorithm stops as soon this
            number of iterations are executed, no matter what the error is at
            the moment.
        '''
        Optimizer.__init__(self)
        self.__f = f
        self.__x = array(x0).ravel()
        n = self.__x.size
        self.__h = ones((n, ))
        self.__h[0] = -0.5
        self.__dx = h * eye(n, 1).reshape(self.__x.shape)

        # Determine ranges of the variables
        if ranges is not None:
            ranges = list(ranges)
            if len(ranges) == 1:
                ranges = array(ranges * len(x0[0]))
            else:
                ranges = array(ranges)
        self.ranges = ranges
        '''Holds the ranges for every variable. Although it is a writable
        property, care should be taken in changing parameters before ending the
        convergence.'''

        self.__emax = float(emax)
        self.__imax = int(imax)