Beispiel #1
0
    def __init__(self, func, xpar, xmin, xmax, npop, sfactor, xprob, step,
                 seed):
        Opt.__init__(self, func, xmin, xmax)
        self.ncores_nm = ncoresNelderMead()
        self.key2 = Key2()
        self.npop = min(npop, 4096)
        self.seed = seed
        self.strategies = \
            (Strategy0(self.func, self.npar, npop, sfactor, xprob),
             Strategy1(self.func, self.npar, npop, sfactor, xprob),
             Strategy2(self.func, self.npar, npop, sfactor, xprob),
             Strategy3(self.func, self.npar, npop, sfactor, xprob),
             Strategy4(self.func, self.npar, npop, sfactor, xprob),
             Strategy5(self.func, self.npar, npop, sfactor, xprob),
             Strategy6(self.func, self.npar, npop, sfactor, xprob),
             Strategy7(self.func, self.npar, npop, sfactor, xprob),
             Strategy8(self.func, self.npar, npop, sfactor, xprob),
             Strategy9(self.func, self.npar, npop, sfactor, xprob))

        xpar = numpy.asarray(xpar)
        if step is None:
            step = xpar * 1.2 + 1.2
        factor = 10
        self.polytope = \
            SimplexRandom(func, npop, xpar, xmin, xmax, step, seed, factor)
        self.local_opt = self.ncores_nm.algo
        return
Beispiel #2
0
    def myopt(myfcn,
              xxx,
              ftol,
              maxfev,
              seed,
              pop,
              xprob,
              weight,
              factor=4.0,
              debug=False):

        x = xxx[0]
        xmin = xxx[1]
        xmax = xxx[2]
        maxfev_per_iter = 512 * x.size

        def random_start(xmin, xmax):
            xx = []
            for ii in range(len(xmin)):
                xx.append(random.uniform(xmin[ii], xmax[ii]))
            return numpy.asarray(xx)

        ############################# NelderMead #############################
        mymaxfev = min(maxfev_per_iter, maxfev)
        if all(x == 0.0):
            mystep = list(map(lambda fubar: 1.2 + fubar, x))
        else:
            mystep = list(map(lambda fubar: 1.2 * fubar, x))
        if 1 == numcores:
            result = neldermead(myfcn,
                                x,
                                xmin,
                                xmax,
                                maxfev=mymaxfev,
                                ftol=ftol,
                                finalsimplex=9,
                                step=mystep)
            x = numpy.asarray(result[1], numpy.float_)
            nfval = result[2]
            nfev = result[4].get('nfev')
        else:
            ncores_nm = ncoresNelderMead()
            nfev, nfval, x = \
                ncores_nm(stat_cb0, x, xmin, xmax, ftol, mymaxfev, numcores)
        if verbose or debug != False:
            print('f_nm%s=%.14e in %d nfev' % (x, nfval, nfev))
        ############################# NelderMead #############################

        ############################## nmDifEvo #############################
        xmin, xmax = _narrow_limits(4 * factor, [x, xmin, xmax], debug=False)
        mymaxfev = min(maxfev_per_iter, maxfev - nfev)
        if 1 == numcores:
            result = difevo_nm(myfcn, x, xmin, xmax, ftol, mymaxfev, verbose,
                               seed, pop, xprob, weight)
            nfev += result[4].get('nfev')
            x = numpy.asarray(result[1], numpy.float_)
            nfval = result[2]
        else:
            ncores_de = ncoresDifEvo()
            mystep = None
            tmp_nfev, tmp_fmin, tmp_par = \
                ncores_de(stat_cb0, x, xmin, xmax, ftol, mymaxfev, mystep,
                          numcores, pop, seed, weight, xprob, verbose)
            nfev += tmp_nfev
            if tmp_fmin < nfval:
                nfval = tmp_fmin
                x = tmp_par
        if verbose or debug != False:
            print('f_de_nm%s=%.14e in %d nfev' % (x, nfval, nfev))
        ############################## nmDifEvo #############################

        ofval = FUNC_MAX
        while nfev < maxfev:

            xmin, xmax = _narrow_limits(factor, [x, xmin, xmax], debug=False)

            ############################ nmDifEvo #############################
            y = random_start(xmin, xmax)
            mymaxfev = min(maxfev_per_iter, maxfev - nfev)
            if 1 == numcores:
                result = difevo_nm(myfcn, y, xmin, xmax, ftol, mymaxfev,
                                   verbose, seed, pop, xprob, weight)
                nfev += result[4].get('nfev')
                if result[2] < nfval:
                    nfval = result[2]
                    x = numpy.asarray(result[1], numpy.float_)
                if verbose or debug != False:
                    print('f_de_nm%s=%.14e in %d nfev' % \
                          (x, result[2], result[4].get('nfev')))
            ############################ nmDifEvo #############################

            if debug != False:
                print('ofval=%.14e\tnfval=%.14e\n' % (ofval, nfval))

            if sao_fcmp(ofval, nfval, ftol) <= 0:
                return x, nfval, nfev
            ofval = nfval
            factor *= 2

        return x, nfval, nfev
Beispiel #3
0
def montecarlo(fcn,
               x0,
               xmin,
               xmax,
               ftol=EPSILON,
               maxfev=None,
               verbose=0,
               seed=74815,
               population_size=None,
               xprob=0.9,
               weighting_factor=0.8,
               numcores=1):
    def stat_cb0(pars):
        return fcn(pars)[0]

    x, xmin, xmax = _check_args(x0, xmin, xmax)

    # make sure that the cross over prob is within [0.1,1.0]
    xprob = max(0.1, xprob)
    xprob = min(xprob, 1.0)

    # make sure that weighting_factor is within [0.1,1.0]
    weighting_factor = max(0.1, weighting_factor)
    weighting_factor = min(weighting_factor, 1.0)

    random.seed(seed)
    if seed is None:
        seed = random.randint(0, 2147483648)  # pow(2,31) == 2147483648L
    if population_size is None:
        population_size = 12 * x.size

    if maxfev is None:
        maxfev = 8192 * population_size

    def myopt(myfcn,
              xxx,
              ftol,
              maxfev,
              seed,
              pop,
              xprob,
              weight,
              factor=4.0,
              debug=False):

        x = xxx[0]
        xmin = xxx[1]
        xmax = xxx[2]
        maxfev_per_iter = 512 * x.size

        def random_start(xmin, xmax):
            xx = []
            for ii in range(len(xmin)):
                xx.append(random.uniform(xmin[ii], xmax[ii]))
            return numpy.asarray(xx)

        ############################# NelderMead #############################
        mymaxfev = min(maxfev_per_iter, maxfev)
        if all(x == 0.0):
            mystep = list(map(lambda fubar: 1.2 + fubar, x))
        else:
            mystep = list(map(lambda fubar: 1.2 * fubar, x))
        if 1 == numcores:
            result = neldermead(myfcn,
                                x,
                                xmin,
                                xmax,
                                maxfev=mymaxfev,
                                ftol=ftol,
                                finalsimplex=9,
                                step=mystep)
            x = numpy.asarray(result[1], numpy.float_)
            nfval = result[2]
            nfev = result[4].get('nfev')
        else:
            ncores_nm = ncoresNelderMead()
            nfev, nfval, x = \
                ncores_nm(stat_cb0, x, xmin, xmax, ftol, mymaxfev, numcores)
        if verbose or debug != False:
            print('f_nm%s=%.14e in %d nfev' % (x, nfval, nfev))
        ############################# NelderMead #############################

        ############################## nmDifEvo #############################
        xmin, xmax = _narrow_limits(4 * factor, [x, xmin, xmax], debug=False)
        mymaxfev = min(maxfev_per_iter, maxfev - nfev)
        if 1 == numcores:
            result = difevo_nm(myfcn, x, xmin, xmax, ftol, mymaxfev, verbose,
                               seed, pop, xprob, weight)
            nfev += result[4].get('nfev')
            x = numpy.asarray(result[1], numpy.float_)
            nfval = result[2]
        else:
            ncores_de = ncoresDifEvo()
            mystep = None
            tmp_nfev, tmp_fmin, tmp_par = \
                ncores_de(stat_cb0, x, xmin, xmax, ftol, mymaxfev, mystep,
                          numcores, pop, seed, weight, xprob, verbose)
            nfev += tmp_nfev
            if tmp_fmin < nfval:
                nfval = tmp_fmin
                x = tmp_par
        if verbose or debug != False:
            print('f_de_nm%s=%.14e in %d nfev' % (x, nfval, nfev))
        ############################## nmDifEvo #############################

        ofval = FUNC_MAX
        while nfev < maxfev:

            xmin, xmax = _narrow_limits(factor, [x, xmin, xmax], debug=False)

            ############################ nmDifEvo #############################
            y = random_start(xmin, xmax)
            mymaxfev = min(maxfev_per_iter, maxfev - nfev)
            if 1 == numcores:
                result = difevo_nm(myfcn, y, xmin, xmax, ftol, mymaxfev,
                                   verbose, seed, pop, xprob, weight)
                nfev += result[4].get('nfev')
                if result[2] < nfval:
                    nfval = result[2]
                    x = numpy.asarray(result[1], numpy.float_)
                if verbose or debug != False:
                    print('f_de_nm%s=%.14e in %d nfev' % \
                          (x, result[2], result[4].get('nfev')))
            ############################ nmDifEvo #############################

            if debug != False:
                print('ofval=%.14e\tnfval=%.14e\n' % (ofval, nfval))

            if sao_fcmp(ofval, nfval, ftol) <= 0:
                return x, nfval, nfev
            ofval = nfval
            factor *= 2

        return x, nfval, nfev

    x, fval, nfev = myopt(fcn, [x, xmin, xmax],
                          numpy.sqrt(ftol),
                          maxfev,
                          seed,
                          population_size,
                          xprob,
                          weighting_factor,
                          factor=2.0,
                          debug=False)

    if nfev < maxfev:
        if all(x == 0.0):
            mystep = list(map(lambda fubar: 1.2 + fubar, x))
        else:
            mystep = list(map(lambda fubar: 1.2 * fubar, x))
        if 1 == numcores:
            result = neldermead(fcn,
                                x,
                                xmin,
                                xmax,
                                maxfev=min(512 * len(x), maxfev - nfev),
                                ftol=ftol,
                                finalsimplex=9,
                                step=mystep)

            x = numpy.asarray(result[1], numpy.float_)
            fval = result[2]
            nfev += result[4].get('nfev')
        else:
            ncores_nm = ncoresNelderMead()
            tmp_nfev, tmp_fmin, tmp_par = \
                ncores_nm(stat_cb0, x, xmin, xmax, ftol, maxfev - nfev, numcores)
            nfev += tmp_nfev
            # There is a bug here somewhere using broyden_tridiagonal
            if tmp_fmin < fval:
                fval = tmp_fmin
                x = tmp_par
    ierr = 0
    if nfev >= maxfev:
        ierr = 3
    status, msg = _get_saofit_msg(maxfev, ierr)

    rv = (status, x, fval, msg, {'info': status, 'nfev': nfev})
    return rv
Beispiel #4
0
def montecarlo(fcn, x0, xmin, xmax, ftol=EPSILON, maxfev=None, verbose=0,
               seed=74815, population_size=None, xprob=0.9,
               weighting_factor=0.8, numcores=1):
    """Monte Carlo optimization method.

    This is an implementation of the differential-evolution algorithm
    from Storn and Price (1997) [1]_. A population of fixed size -
    which contains n-dimensional vectors, where n is the number of
    free parameters - is randomly initialized.  At each iteration, a
    new n-dimensional vector is generated by combining vectors from
    the pool of population, the resulting trial vector is selected if
    it lowers the objective function.

    Parameters
    ----------
    fcn : function reference
       Returns the current statistic and per-bin statistic value when
       given the model parameters.
    x0, xmin, xmax : sequence of number
       The starting point, minimum, and maximum values for each
       parameter.
    ftol : number
       The function tolerance to terminate the search for the minimum;
       the default is sqrt(DBL_EPSILON) ~ 1.19209289551e-07, where
       DBL_EPSILON is the smallest number x such that ``1.0 != 1.0 +
       x``.
    maxfev : int or `None`
       The maximum number of function evaluations; the default value
       of `None` means to use ``8192 * n``, where `n` is the number of
       free parameters.
    verbose: int
       The amount of information to print during the fit. The default
       is `0`, which means no output.
    seed : int
       The seed for the random number generator.
    population_size : int or `None`
       The population of potential solutions is allowed to evolve to
       search for the minimum of the fit statistics. The trial
       solution is randomly chosen from a combination from the current
       population, and it is only accepted if it lowers the
       statistics.  A value of `None` means to use a value ``16 * n``,
       where `n` is the number of free parameters.
    xprob : num
       The crossover probability should be within the range [0.5,1.0];
       default value is 0.9. A high value for the crossover
       probability should result in a faster convergence rate;
       conversely, a lower value should make the differential
       evolution method more robust.
    weighting_factor: num
       The weighting factor should be within the range [0.5, 1.0];
       default is 0.8. Differential evolution is more sensitive to the
       weighting_factor then the xprob parameter. A lower value for
       the weighting_factor, coupled with an increase in the
       population_size, gives a more robust search at the cost of
       efficiency.
    numcores : int
       The number of CPU cores to use. The default is `1`.

    References
    ----------

    .. [1] Storn, R. and Price, K. "Differential Evolution: A Simple
           and Efficient Adaptive Scheme for Global Optimization over
           Continuous Spaces." J. Global Optimization 11, 341-359,
           1997.
           http://www.icsi.berkeley.edu/~storn/code.html

    """

    def stat_cb0(pars):
        return fcn(pars)[0]

    x, xmin, xmax = _check_args(x0, xmin, xmax)

    # make sure that the cross over prob is within [0.1,1.0]
    xprob = max(0.1, xprob)
    xprob = min(xprob, 1.0)

    # make sure that weighting_factor is within [0.1,1.0]
    weighting_factor = max(0.1, weighting_factor)
    weighting_factor = min(weighting_factor, 1.0)

    random.seed(seed)
    if seed is None:
        # pow(2,31) == 2147483648L
        seed = random.randint(0, 2147483648)
    if population_size is None:
        population_size = 12 * x.size

    if maxfev is None:
        maxfev = 8192 * population_size

    def myopt(myfcn, xxx, ftol, maxfev, seed, pop, xprob,
              weight, factor=4.0, debug=False):

        x = xxx[0]
        xmin = xxx[1]
        xmax = xxx[2]
        maxfev_per_iter = 512 * x.size

        def random_start(xmin, xmax):
            xx = []
            for ii in range(len(xmin)):
                xx.append(random.uniform(xmin[ii], xmax[ii]))
            return numpy.asarray(xx)

        ############################# NelderMead #############################
        mymaxfev = min(maxfev_per_iter, maxfev)
        if all(x == 0.0):
            mystep = list(map(lambda fubar: 1.2 + fubar, x))
        else:
            mystep = list(map(lambda fubar: 1.2 * fubar, x))
        if 1 == numcores:
            result = neldermead(myfcn, x, xmin, xmax, maxfev=mymaxfev,
                                ftol=ftol, finalsimplex=9, step=mystep)
            x = numpy.asarray(result[1], numpy.float_)
            nfval = result[2]
            nfev = result[4].get('nfev')
        else:
            ncores_nm = ncoresNelderMead()
            nfev, nfval, x = \
                ncores_nm(stat_cb0, x, xmin, xmax, ftol, mymaxfev, numcores)

        if verbose or debug:
            print('f_nm%s=%.14e in %d nfev' % (x, nfval, nfev))
        ############################# NelderMead #############################

        ############################## nmDifEvo #############################
        xmin, xmax = _narrow_limits(4 * factor, [x, xmin, xmax], debug=False)
        mymaxfev = min(maxfev_per_iter, maxfev - nfev)
        if 1 == numcores:
            result = difevo_nm(myfcn, x, xmin, xmax, ftol, mymaxfev, verbose,
                               seed, pop, xprob, weight)
            nfev += result[4].get('nfev')
            x = numpy.asarray(result[1], numpy.float_)
            nfval = result[2]
        else:
            ncores_de = ncoresDifEvo()
            mystep = None
            tmp_nfev, tmp_fmin, tmp_par = \
                ncores_de(stat_cb0, x, xmin, xmax, ftol, mymaxfev, mystep,
                          numcores, pop, seed, weight, xprob, verbose)
            nfev += tmp_nfev
            if tmp_fmin < nfval:
                nfval = tmp_fmin
                x = tmp_par

        if verbose or debug:
            print('f_de_nm%s=%.14e in %d nfev' % (x, nfval, nfev))
        ############################## nmDifEvo #############################

        ofval = FUNC_MAX
        while nfev < maxfev:

            xmin, xmax = _narrow_limits(factor, [x, xmin, xmax], debug=False)

            ############################ nmDifEvo #############################
            y = random_start(xmin, xmax)
            mymaxfev = min(maxfev_per_iter, maxfev - nfev)
            if numcores == 1:
                result = difevo_nm(myfcn, y, xmin, xmax, ftol, mymaxfev,
                                   verbose, seed, pop, xprob, weight)
                nfev += result[4].get('nfev')
                if result[2] < nfval:
                    nfval = result[2]
                    x = numpy.asarray(result[1], numpy.float_)
                if verbose or debug:
                    print('f_de_nm%s=%.14e in %d nfev' %
                          (x, result[2], result[4].get('nfev')))
            ############################ nmDifEvo #############################

            if debug:
                print('ofval=%.14e\tnfval=%.14e\n' % (ofval, nfval))

            if sao_fcmp(ofval, nfval, ftol) <= 0:
                return x, nfval, nfev
            ofval = nfval
            factor *= 2

        return x, nfval, nfev

    x, fval, nfev = myopt(fcn, [x, xmin, xmax], numpy.sqrt(ftol), maxfev,
                          seed, population_size, xprob, weighting_factor,
                          factor=2.0, debug=False)

    if nfev < maxfev:
        if all(x == 0.0):
            mystep = list(map(lambda fubar: 1.2 + fubar, x))
        else:
            mystep = list(map(lambda fubar: 1.2 * fubar, x))
        if 1 == numcores:
            result = neldermead(fcn, x, xmin, xmax,
                                maxfev=min(512*len(x), maxfev - nfev),
                                ftol=ftol, finalsimplex=9, step=mystep)

            x = numpy.asarray(result[1], numpy.float_)
            fval = result[2]
            nfev += result[4].get('nfev')
        else:
            ncores_nm = ncoresNelderMead()
            tmp_nfev, tmp_fmin, tmp_par = \
                ncores_nm(stat_cb0, x, xmin, xmax, ftol, maxfev - nfev,
                          numcores)
            nfev += tmp_nfev
            # There is a bug here somewhere using broyden_tridiagonal
            if tmp_fmin < fval:
                fval = tmp_fmin
                x = tmp_par
    ierr = 0
    if nfev >= maxfev:
        ierr = 3
    status, msg = _get_saofit_msg(maxfev, ierr)

    rv = (status, x, fval, msg, {'info': status, 'nfev': nfev})
    return rv
Beispiel #5
0
#
#  You should have received a copy of the GNU General Public License along
#  with this program; if not, write to the Free Software Foundation, Inc.,
#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#


from sherpa.utils import _ncpus
from sherpa.optmethods.ncoresnm import ncoresNelderMead
from sherpa.optmethods.ncoresde import ncoresDifEvo
import sherpa.optmethods.opt as tstopt

import pytest

NUMPAR = 10
NCORES_NM = ncoresNelderMead()
NCORES_DE = ncoresDifEvo()


def print_result(name, f, x, nfev):
    print('%s(%s) = %g in %d nfev' % (name, x, f, nfev))


# Mapping from SherpaTestCase.assertEqualWithinTol to
#              pytest.approx
# and noting that the tolerance is an absolute tolerance,
# not relative in SherpaTestCase.
#
def tst_opt(opt, fcn, x0, xmin, xmax, fmin, tol=1e-2):
    def func(arg):
        return fcn(arg)[0]
Beispiel #6
0
 def __init__(self):
     self.ncores_nm = ncoresNelderMead()
     return