예제 #1
0
    def _decorate_objective(self, cost, ExtraArgs=None):
        """decorate the cost function with bounds, penalties, monitors, etc

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective."""
        #print("@%r %r %r" % (cost, ExtraArgs, max))
        evalmon = self._evalmon
        raw = cost
        if ExtraArgs is None: ExtraArgs = ()
        self._fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
        if self._useStrictRange:
            indx = list(self.popEnergy).index(self.bestEnergy)
            ngen = self.generations  #XXX: no random if generations=0 ?
            for i in range(self.nPop):
                self.population[i] = self._clipGuessWithinRangeBoundary(
                    self.population[i], (not ngen) or (i == indx))
            cost = wrap_bounds(cost, self._strictMin,
                               self._strictMax)  #XXX: remove?
            from mystic.constraints import and_
            constraints = and_(self._constraints,
                               self._strictbounds,
                               onfail=self._strictbounds)
        else:
            constraints = self._constraints
        cost = wrap_penalty(cost, self._penalty)
        cost = wrap_nested(cost, constraints)
        if self._reducer:
            #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
            cost = reduced(self._reducer, arraylike=True)(cost)
        # hold on to the 'wrapped' and 'raw' cost function
        self._cost = (cost, raw, ExtraArgs)
        self._live = True
        return cost
예제 #2
0
    def _decorate_objective(self, cost, ExtraArgs=None):
        """decorate the cost function with bounds, penalties, monitors, etc

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective."""
        #print("@%r %r %r" % (cost, ExtraArgs, max))
        evalmon = self._evalmon
        raw = cost
        if ExtraArgs is None: ExtraArgs = ()
        self._fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
        if self._useStrictRange:
            if self.generations:
                #NOTE: pop[0] was best, may not be after resetting simplex
                for i, j in enumerate(self._setSimplexWithinRangeBoundary()):
                    self.population[i + 1] = self.population[0].copy()
                    self.population[i + 1][i] = j
            else:
                self.population[0] = self._clipGuessWithinRangeBoundary(
                    self.population[0])
            cost = wrap_bounds(cost, self._strictMin,
                               self._strictMax)  #XXX: remove?
            from mystic.constraints import and_
            constraints = and_(self._constraints,
                               self._strictbounds,
                               onfail=self._strictbounds)
        else:
            constraints = self._constraints
        cost = wrap_penalty(cost, self._penalty)
        cost = wrap_nested(cost, constraints)
        if self._reducer:
            #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
            cost = reduced(self._reducer, arraylike=True)(cost)
        # hold on to the 'wrapped' and 'raw' cost function
        self._cost = (cost, raw, ExtraArgs)
        self._live = True
        return cost
예제 #3
0
    def _Step(self, cost=None, ExtraArgs=None, **kwds):
        """perform a single optimization iteration

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective.

note::
    ExtraArgs needs to be a *tuple* of extra arguments.

    This method accepts additional args that are specific for the current
    solver, as detailed in the `_process_inputs` method.
        """
        # process and activate input settings
        settings = self._process_inputs(kwds)
        #(hardwired: due to python3.x exec'ing to locals())
        callback = settings['callback'] if 'callback' in settings else None
        disp = settings['disp'] if 'disp' in settings else False
        xtol = settings['xtol'] if 'xtol' in settings else self.xtol
        imax = settings['imax'] if 'imax' in settings else self.imax

        # HACK to enable not explicitly calling _decorate_objective
        cost = self._bootstrap_objective(cost, ExtraArgs)

        if self._useStrictRange:  #XXX: necessary? or handled by wrap_nested?
            from mystic.constraints import and_
            constraints = and_(self._constraints,
                               self._strictbounds,
                               onfail=self._strictbounds)
        else:
            constraints = self._constraints

        direc = self._direc  #XXX: throws Error if direc=None after generation=0
        x = self.population[0]  # bestSolution
        fval = self.popEnergy[0]  # bestEnergy
        x1, fx, bigind, delta = self.__internals
        init = False  # flag to do 0th iteration 'post-initialization'

        if not len(self._stepmon):  # do generation = 0
            init = True
            x = asfarray(x).flatten()
            x = asfarray(constraints(x))
            N = len(x)  #XXX: this should be equal to self.nDim
            rank = len(x.shape)
            if not -1 < rank < 2:
                raise ValueError(
                    "Initial guess must be a scalar or rank-1 sequence.")

            if direc is None:
                direc = eye(N, dtype=float)
            else:
                direc = asarray(direc, dtype=float)
            fval = squeeze(cost(x))
            if self._maxiter != 0:
                self._stepmon(x, fval, self.id)  # get initial values
                # if savefrequency matches, then save state
                self._AbstractSolver__save_state()

        elif not self.generations:  # do generations = 1
            ilist = range(len(x))
            x1 = x.copy()
            # do initial "second half" of solver step
            fx = fval
            bigind = 0
            delta = 0.0
            for i in ilist:
                direc1 = self._direc[i]
                fx2 = fval
                fval, x, direc1 = _linesearch_powell(cost,
                                                     x,
                                                     direc1,
                                                     tol=xtol * 100,
                                                     maxiter=imax)
                isnan = numpy.isinf(fx2) & numpy.isinf(fval)
                if not isnan and (fx2 - fval) > delta:
                    delta = fx2 - fval
                    bigind = i

                # apply constraints
                x = asfarray(constraints(x))  #XXX: use self._map?
            # decouple from 'best' energy
            self.energy_history = self.energy_history + [fval]

        else:  # do generations > 1
            # Construct the extrapolated point
            direc1 = x - x1
            x2 = 2 * x - x1
            x1 = x.copy()
            fx2 = squeeze(cost(x2))

            if (fx > fx2):
                t = 2.0 * (fx + fx2 - 2.0 * fval)
                temp = (fx - fval - delta)
                t *= temp * temp
                temp = fx - fx2
                t -= delta * temp * temp
                if t < 0.0:
                    fval, x, direc1 = _linesearch_powell(cost,
                                                         x,
                                                         direc1,
                                                         tol=xtol * 100,
                                                         maxiter=imax)
                    direc[bigind] = direc[-1]
                    direc[-1] = direc1

        #        x = asfarray(constraints(x))

            self._direc = direc
            self.population[0] = x  # bestSolution
            self.popEnergy[0] = fval  # bestEnergy
            self.energy_history = None  # resync with 'best' energy
            self._stepmon(x, fval, self.id)  # get ith values
            # if savefrequency matches, then save state
            self._AbstractSolver__save_state()

            fx = fval
            bigind = 0
            delta = 0.0
            ilist = range(len(x))
            for i in ilist:
                direc1 = direc[i]
                fx2 = fval
                fval, x, direc1 = _linesearch_powell(cost,
                                                     x,
                                                     direc1,
                                                     tol=xtol * 100,
                                                     maxiter=imax)
                isnan = numpy.isinf(fx2) & numpy.isinf(fval)
                if not isnan and (fx2 - fval) > delta:
                    delta = fx2 - fval
                    bigind = i

                # apply constraints
                x = asfarray(constraints(x))  #XXX: use self._map?

            # decouple from 'best' energy
            self.energy_history = self.energy_history + [fval]

        self.__internals = [x1, fx, bigind, delta]
        self._direc = direc
        self.population[0] = x  # bestSolution
        self.popEnergy[0] = fval  # bestEnergy

        # do callback
        if callback is not None: callback(self.bestSolution)
        # initialize termination conditions, if needed
        if init: self._termination(self)  #XXX: at generation 0 or always?
        return  #XXX: call Terminated ?
예제 #4
0
    def _Step(self, cost=None, ExtraArgs=None, **kwds):
        """perform a single optimization iteration

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective.

note::
    ExtraArgs needs to be a *tuple* of extra arguments.

    This method accepts additional args that are specific for the current
    solver, as detailed in the `_process_inputs` method.
        """
        # process and activate input settings
        settings = self._process_inputs(kwds)
        #(hardwired: due to python3.x exec'ing to locals())
        callback = settings['callback'] if 'callback' in settings else None
        disp = settings['disp'] if 'disp' in settings else False
        radius = settings['radius'] if 'radius' in settings else self.radius
        adaptive = settings[
            'adaptive'] if 'adaptive' in settings else self.adaptive

        # HACK to enable not explicitly calling _decorate_objective
        cost = self._bootstrap_objective(cost, ExtraArgs)

        if self._useStrictRange:  #XXX: necessary? or handled by wrap_nested?
            from mystic.constraints import and_
            constraints = and_(self._constraints,
                               self._strictbounds,
                               onfail=self._strictbounds)
        else:
            constraints = self._constraints

        if adaptive:
            dim = float(len(self.population[0]))  # dimensionality of x0
            rho = 1
            chi = 1 + 2 / dim
            psi = 0.75 - 1 / (2 * dim)
            sigma = 1 - 1 / dim
        else:
            rho = 1
            chi = 2
            psi = 0.5
            sigma = 0.5
        init = False  # flag to do 0th iteration 'post-initialization'

        if not len(self._stepmon):  # do generation = 0
            init = True
            x0 = self.population[0]
            x0 = asfarray(x0).flatten()
            x0 = asfarray(constraints(x0))
            #####XXX: this blows away __init__, so replace __init__ with this?
            N = len(x0)
            rank = len(x0.shape)
            if not -1 < rank < 2:
                raise ValueError(
                    "Initial guess must be a scalar or rank-1 sequence.")
            if rank == 0:
                sim = numpy.zeros((N + 1, ), dtype=x0.dtype)
            else:
                sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
            fsim = numpy.ones((N + 1, ), float) * self._init_popEnergy
            ####################################################
            sim[0] = x0
            fsim[0] = cost(x0)

        elif not self.generations:  # do generations = 1
            #--- ensure initial simplex is within bounds ---
            val = self._setSimplexWithinRangeBoundary(radius)
            #--- end bounds code ---
            sim = self.population
            fsim = self.popEnergy
            x0 = sim[0]
            N = len(x0)
            # populate the simplex
            for k in range(0, N):
                y = numpy.array(x0, copy=True)
                y[k] = val[k]
                sim[k + 1] = y
                f = cost(y)  #XXX: use self._map?
                fsim[k + 1] = f

        else:  # do generations > 1
            sim = self.population
            fsim = self.popEnergy
            N = len(sim[0])
            one2np1 = range(1, N + 1)

            # apply constraints  #XXX: is this the only appropriate place???
            sim[0] = asfarray(constraints(sim[0]))

            xbar = numpy.add.reduce(sim[:-1], 0) / N
            xr = (1 + rho) * xbar - rho * sim[-1]
            fxr = cost(xr)
            doshrink = 0

            if fxr < fsim[0]:
                xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
                fxe = cost(xe)

                if fxe < fxr:
                    sim[-1] = xe
                    fsim[-1] = fxe
                else:
                    sim[-1] = xr
                    fsim[-1] = fxr
            else:  # fsim[0] <= fxr
                if fxr < fsim[-2]:
                    sim[-1] = xr
                    fsim[-1] = fxr
                else:  # fxr >= fsim[-2]
                    # Perform contraction
                    if fxr < fsim[-1]:
                        xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
                        fxc = cost(xc)

                        if fxc <= fxr:
                            sim[-1] = xc
                            fsim[-1] = fxc
                        else:
                            doshrink = 1
                    else:
                        # Perform an inside contraction
                        xcc = (1 - psi) * xbar + psi * sim[-1]
                        fxcc = cost(xcc)

                        if fxcc < fsim[-1]:
                            sim[-1] = xcc
                            fsim[-1] = fxcc
                        else:
                            doshrink = 1

                    if doshrink:
                        for j in one2np1:
                            sim[j] = sim[0] + sigma * (sim[j] - sim[0])
                            fsim[j] = cost(sim[j])  #XXX: use self._map?

        if len(self._stepmon):
            # sort so sim[0,:] has the lowest function value
            ind = numpy.argsort(fsim)
            sim = numpy.take(sim, ind, 0)
            fsim = numpy.take(fsim, ind, 0)
        self.population = sim  # bestSolution = sim[0]
        self.popEnergy = fsim  # bestEnergy = fsim[0]
        self._stepmon(sim[0], fsim[0], self.id)  # sim = all; "best" is sim[0]
        # if savefrequency matches, then save state
        self._AbstractSolver__save_state()

        # do callback
        if callback is not None: callback(self.bestSolution)
        # initialize termination conditions, if needed
        if init: self._termination(self)  #XXX: at generation 0 or always?
        return  #XXX: call Terminated ?
예제 #5
0
from mystic.bounds import MeasureBounds

bnd = MeasureBounds(xlb, xub, n=npts, wlb=wlb, wub=wub)

## moment-based constraints ##
normcon = normalize_moments()
###momcons = constrain_moments(a_ave, a_var, a_ave_err, a_var_err)
###is_cons = constrained(a_ave, a_var, a_ave_err, a_var_err)
#momcon0 = constrain_moments(a_ave, a_var, a_ave_err, a_var_err, idx=0)
#momcon1 = constrain_moments(b_ave, b_var, b_ave_err, b_var_err, idx=1)
#is_con0 = constrained(a_ave, a_var, a_ave_err, a_var_err, idx=0)
#is_con1 = constrained(b_ave, b_var, b_ave_err, b_var_err, idx=1)
#is_cons = lambda c: bool(additive(is_con0)(is_con1)(c))
momcons = constrain_expected(model, o_ave, o_ave_err, (bnd.lower, bnd.upper))
is_cons = constrained_out(model, o_ave, o_ave_err)

## index-based constraints ##
# impose constraints sequentially (faster, but assumes are decoupled)
#scons = outer(integer_indices)(flatten(npts)(outer(momcons)(normcon)))
scons = flatten(npts)(outer(momcons)(normcon))
#scons = flatten(npts)(outer(momcon1)(outer(momcon0)(normcon)))

# impose constraints concurrently (slower, but safer)
#ccons = and_(flatten(npts)(normcon), flatten(npts)(momcons), integer_indices)
ccons = and_(flatten(npts)(normcon), flatten(npts)(momcons))
#ccons = and_(flatten(npts)(normcon), flatten(npts)(momcon0), flatten(npts)(momcon1))

# check parameters (instead of measures)
iscon = check(npts)(is_cons)
#rvcon = constrained_integers(index)
예제 #6
0
N = 10
b = 5
bounds = [(0, 1)] * N


def objective(x, w):
    s = 0
    for i in range(len(x) - 1):
        for j in range(i, len(x)):
            s += w[i, j] * x[i] * x[j]
    return s


w = np.ones((N, N))  #XXX: replace with actual values of wij

cost = lambda x: -objective(x, w)

c = and_(lambda x: impose_sum(b, x), discrete([0, 1])(lambda x: x))

mon = VerboseMonitor(10)
solution = diffev2(cost,
                   bounds,
                   constraints=c,
                   bounds=bounds,
                   itermon=mon,
                   gtol=50,
                   maxiter=5000,
                   maxfun=50000,
                   npop=10)
print(solution)
    def _Step(self, cost=None, ExtraArgs=None, **kwds):
        """perform a single optimization iteration

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective.

note::
    ExtraArgs needs to be a *tuple* of extra arguments.

    This method accepts additional args that are specific for the current
    solver, as detailed in the `_process_inputs` method.
        """
        # process and activate input settings
        settings = self._process_inputs(kwds)
        #(hardwired: due to python3.x exec'ing to locals())
        callback = settings['callback'] if 'callback' in settings else None
        disp = settings['disp'] if 'disp' in settings else False
        strategy = settings[
            'strategy'] if 'strategy' in settings else self.strategy

        # HACK to enable not explicitly calling _decorate_objective
        cost = self._bootstrap_objective(cost, ExtraArgs)

        init = False  # flag to do 0th iteration 'post-initialization'

        if not len(self._stepmon):  # do generation = 0
            init = True
            strategy = None
            self.population[0] = asfarray(self.population[0])
            # decouple bestSolution from population and bestEnergy from popEnergy
            bs = self.population[0]
            self.bestSolution = bs.copy() if hasattr(bs, 'copy') else bs[:]
            self.bestEnergy = self.popEnergy[0]
            del bs

        if self._useStrictRange:
            from mystic.constraints import and_
            constraints = and_(self._constraints,
                               self._strictbounds,
                               onfail=self._strictbounds)
        else:
            constraints = self._constraints

        for candidate in range(self.nPop):
            if not len(self._stepmon):
                # generate trialSolution (within valid range)
                self.trialSolution[candidate][:] = self.population[candidate]
            if strategy:
                # generate trialSolution (within valid range)
                strategy(self, candidate)
            # apply constraints
            self.trialSolution[candidate][:] = constraints(
                self.trialSolution[candidate])
        # bind constraints to cost #XXX: apparently imposes constraints poorly
    #concost = wrap_nested(cost, constraints)

    # apply penalty
    #trialEnergy = map(self._penalty, self.trialSolution)#,**self._mapconfig)
    # calculate cost
        trialEnergy = self._map(cost, self.trialSolution, **self._mapconfig)
        self._fcalls[0] += len(self.trialSolution)  #FIXME: manually increment

        # each trialEnergy should be a scalar
        if isiterable(trialEnergy[0]) and len(trialEnergy[0]) == 1:
            trialEnergy = ravel(trialEnergy)
            # for len(trialEnergy) > 1, will throw ValueError below

        for candidate in range(self.nPop):
            if trialEnergy[candidate] < self.popEnergy[candidate]:
                # New low for this candidate
                self.popEnergy[candidate] = trialEnergy[candidate]
                self.population[candidate][:] = self.trialSolution[candidate]
                self.UpdateGenealogyRecords(candidate,
                                            self.trialSolution[candidate][:])

                # Check if all-time low
                if trialEnergy[candidate] < self.bestEnergy:
                    self.bestEnergy = trialEnergy[candidate]
                    self.bestSolution[:] = self.trialSolution[candidate]

        # log bestSolution and bestEnergy (includes penalty)

    #FIXME: StepMonitor works for 'pp'?
        self._stepmon(self.bestSolution[:], self.bestEnergy, self.id)
        # if savefrequency matches, then save state
        self._AbstractSolver__save_state()

        # do callback
        if callback is not None: callback(self.bestSolution)
        # initialize termination conditions, if needed
        if init: self._termination(self)  #XXX: at generation 0 or always?
        return  #XXX: call Terminated ?
예제 #8
0
"""
Maximization with a boolean variable and constraints.
"""
from mystic.solvers import diffev2
from mystic.monitors import VerboseMonitor
from mystic.constraints import impose_sum, discrete, and_
import numpy as np

N = 10
b = 5
bounds = [(0,1)] * N

def objective(x, w):
    s = 0
    for i in range(len(x)-1):
        for j in range(i, len(x)):
            s += w[i,j] * x[i] * x[j]
    return s


w = np.ones((N,N)) #XXX: replace with actual values of wij

cost = lambda x: -objective(x, w)

c = and_(lambda x: impose_sum(b, x), discrete([0,1])(lambda x:x))

mon = VerboseMonitor(10)
solution = diffev2(cost,bounds,constraints=c,bounds=bounds,itermon=mon,gtol=50, maxiter=5000, maxfun=50000, npop=10)
print(solution)