Пример #1
0
def fmin_particle_swarm(f, x0, err_crit, iter_max, popsize=100, c1=2, c2=2):
    """
    A simple implementation of the Particle Swarm Optimization Algorithm.
    Pradeep Gowda 2009-03-16

    Parameters
    ----------
    f : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    err_crit : float
        Critical error (i.e. tolerance).  Stops when error < err_crit.

    iter_max : int
        Maximum iterations.

    popsize : int, optional
        Population size.  Larger populations are better at finding the global
        optimum but make the algorithm take longer to run.

    c1 : float, optional
        Coefficient describing a particle's affinity for it's (local) maximum.

    c2 : float, optional
        Coefficient describing a particle's affinity for the best maximum any
        particle has seen (the current global max).

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """
    dimensions = len(x0)
    LARGE = 1e10

    class Particle:
        pass

    #initialize the particles
    particles = []
    for i in range(popsize):
        p = Particle()
        p.params = x0 + 2 * (_np.random.random(dimensions) - 0.5)
        p.best = p.params[:]
        p.fitness = LARGE  # large == bad fitness
        p.v = _np.zeros(dimensions)
        particles.append(p)

    # let the first particle be the global best
    gbest = particles[0]
    ibest = 0
    # bDoLocalFitnessOpt = False

    #DEBUG
    #if False:
    #    import pickle as _pickle
    #    bestGaugeMx = _pickle.load(open("bestGaugeMx.debug"))
    #    lbfgsbGaugeMx = _pickle.load(open("lbfgsbGaugeMx.debug"))
    #    cgGaugeMx = _pickle.load(open("cgGaugeMx.debug"))
    #    initialGaugeMx = x0.reshape( (4,4) )
    #
    #    #DEBUG: dump line cut to plot
    #    nPts = 100
    #    print "DEBUG: best offsets = \n", bestGaugeMx - initialGaugeMx
    #    print "DEBUG: lbfgs offsets = \n", lbfgsbGaugeMx - initialGaugeMx
    #    print "DEBUG: cg offsets = \n", cgGaugeMx - initialGaugeMx
    #
    #    print "# DEBUG plot"
    #    #fDebug = open("x0ToBest.dat","w")
    #    #fDebug = open("x0ToLBFGS.dat","w")
    #    fDebug = open("x0ToCG.dat","w")
    #    #fDebug = open("LBFGSToBest.dat","w")
    #    #fDebug = open("CGToBest.dat","w")
    #    #fDebug = open("CGToLBFGS.dat","w")
    #
    #    for i in range(nPts+1):
    #        alpha = float(i) / nPts
    #        #matM = (1.0-alpha) * initialGaugeMx + alpha*bestGaugeMx
    #        #matM = (1.0-alpha) * initialGaugeMx + alpha*lbfgsbGaugeMx
    #        matM = (1.0-alpha) * initialGaugeMx + alpha*cgGaugeMx
    #        #matM = (1.0-alpha) * lbfgsbGaugeMx + alpha*bestGaugeMx
    #        #matM = (1.0-alpha) * cgGaugeMx + alpha*bestGaugeMx
    #        #matM = (1.0-alpha) * cgGaugeMx + alpha*lbfgsbGaugeMx
    #        print >> fDebug, "%g %g" % (alpha, f(matM.flatten()))
    #    exit()
    #
    #
    #    fDebug = open("lineDataFromX0.dat","w")
    #    min_offset = -1; max_offset = 1
    #    for i in range(nPts+1):
    #        offset = min_offset + float(i)/nPts * (max_offset-min_offset)
    #        print >> fDebug, "%g" % offset,
    #
    #        for k in range(len(x0)):
    #            x = x0.copy(); x[k] += offset
    #            try:
    #                print >> fDebug, " %g" % f(x),
    #            except:
    #                print >> fDebug, " nan",
    #        print >> fDebug, ""
    #
    #    print >> fDebug, "#END DEBUG plot"
    #    exit()
    #END DEBUG

    #err = 1e10
    for iter_num in range(iter_max):
        w = 1.0  #- i/iter_max

        #bDoLocalFitnessOpt = bool(iter_num > 20 and abs(lastBest-gbest.fitness) < 0.001 and iter_num % 10 == 0)
        # lastBest = gbest.fitness
        # minDistToBest = 1e10; minV = 1e10; maxV = 0 #DEBUG

        for (ip, p) in enumerate(particles):
            fitness = f(p.params)

            #if bDoLocalFitnessOpt:
            #    opts = {'maxiter': 100, 'maxfev': 100, 'disp': False }
            #    local_soln = _spo.minimize(f,p.params,options=opts, method='L-BFGS-B',callback=None, tol=1e-2)
            #    p.params = local_soln.x
            #    fitness = local_soln.fun

            if fitness < p.fitness:  #low 'fitness' is good b/c we're minimizing
                p.fitness = fitness
                p.best = p.params

            if fitness < gbest.fitness:
                gbest = p
                ibest = ip

            v = w*p.v + c1 * _np.random.random() * (p.best - p.params) \
                    + c2 * _np.random.random() * (gbest.params - p.params)
            p.params = p.params + v
            for (i, pv) in enumerate(p.params):
                p.params[i] = (
                    (pv + 1) % 2) - 1  #periodic b/c on box between -1 and 1

            #from .. import tools as tools_
            #matM = p.params.reshape( (4,4) )  #DEBUG
            #minDistToBest = min(minDistToBest, _tools.frobeniusdist(
            #                                    bestGaugeMx,matM)) #DEBUG
            #minV = min( _np.linalg.norm(v), minV)
            #maxV = max( _np.linalg.norm(v), maxV)

        #print "DB: min diff from best = ", minDistToBest #DEBUG
        #print "DB: min,max v = ", (minV,maxV)

        #if False: #bDoLocalFitnessOpt:
        #    opts = {'maxiter': 100, 'maxfev': 100, 'disp': False }
        #    print "initial fun = ",gbest.fitness,
        #    local_soln = _spo.minimize(f,gbest.params,options=opts, method='L-BFGS-B',callback=None, tol=1e-5)
        #    gbest.params = local_soln.x
        #    gbest.fitness = local_soln.fun
        #    print "  final fun = ",gbest.fitness

        print("Iter %d: global best = %g (index %d)" %
              (iter_num, gbest.fitness, ibest))

        #if err < err_crit:  break  #TODO: stopping condition

    ## Uncomment to print particles
    #for p in particles:
    #    print 'params: %s, fitness: %s, best: %s' % (p.params, p.fitness, p.best)

    solution = _optResult()
    solution.x = gbest.params
    solution.fun = gbest.fitness
    solution.success = True
    #    if iter_num < maxiter:
    #        solution.success = True
    #    else:
    #        solution.success = False
    #        solution.message = "Maximum iterations exceeded"
    return solution
Пример #2
0
def fmin_evolutionary(f, x0, num_generations, num_individuals):
    """
    Minimize a function using an evolutionary algorithm.

    Uses python's deap package to perform an evolutionary
    algorithm to find a function's global minimum.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    num_generations : int
        The number of generations to carry out. (similar to the number
        of iterations)

    num_individuals : int
        The number of individuals in each generation.  More individuals
        make finding the global optimum more likely, but take longer
        to run.


    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """

    import deap as _deap
    import deap.creator as _creator
    import deap.base as _base
    import deap.tools as _tools
    numParams = len(x0)

    # Create the individual class
    _creator.create("FitnessMin", _base.Fitness, weights=(-1.0, ))
    _creator.create("Individual", list, fitness=_creator.FitnessMin)

    # Create initialization functions
    toolbox = _base.Toolbox()
    toolbox.register("random", _np.random.random)
    toolbox.register(
        "individual",
        _tools.initRepeat,
        _creator.Individual,
        toolbox.random,
        n=numParams
    )  # fn to init an individual from a list of numParams random numbers
    toolbox.register("population", _tools.initRepeat, list, toolbox.individual
                     )  # fn to create a population (still need to specify n)

    # Create operation functions
    def evaluate(individual):
        return f(_np.array(individual)),  #note: must return a tuple

    toolbox.register("mate", _tools.cxTwoPoint)
    toolbox.register("mutate", _tools.mutGaussian, mu=0, sigma=0.5, indpb=0.1)
    toolbox.register("select", _tools.selTournament, tournsize=3)
    toolbox.register("evaluate", evaluate)

    # Create the population
    pop = toolbox.population(n=num_individuals)

    # Evaluate the entire population
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    PROB_TO_CROSS = 0.5
    PROB_TO_MUTATE = 0.2

    # Initialize statistics
    stats = _tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("avg", _np.mean)
    stats.register("std", _np.std)
    stats.register("min", _np.min)
    stats.register("max", _np.max)
    logbook = _tools.Logbook()

    #Run algorithm
    for g in range(num_generations):
        record = stats.compile(pop)
        logbook.record(gen=g, **record)
        print("Gen %d: %s" % (g, record))

        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))

        # Apply crossover on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if _np.random.random() < PROB_TO_CROSS:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        # Apply mutation on the offspring
        for mutant in offspring:
            if _np.random.random() < PROB_TO_MUTATE:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # The population is entirely replaced by the offspring
        pop[:] = offspring

    #get best individual and return params
    indx_min_fitness = _np.argmin([ind.fitness.values[0] for ind in pop])
    best_params = _np.array(pop[indx_min_fitness])

    solution = _optResult()
    solution.x = best_params
    solution.fun = pop[indx_min_fitness].fitness.values[0]
    solution.success = True
    return solution
Пример #3
0
def fmin_supersimplex(fn, x0, outer_tol, inner_tol, max_outer_iter,
                      min_inner_maxiter, max_inner_maxiter):
    """
    Minimize a function using repeated applications of the simplex algorithm.

    By varying the maximum number of iterations and repeatedly calling scipy's
    Nelder-Mead simplex optimization, this function performs as a robust (but
    slow) minimization.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    outer_tol : float
        Tolerance of outer loop

    inner_tol : float
        Tolerance fo inner loop

    max_outer_iter : int
        Maximum number of outer-loop iterations

    min_inner_maxiter : int
        Minimum number of inner-loop iterations

    max_inner_maxiter : int
        Maxium number of outer-loop iterations

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """
    f_init = fn(x0)
    f_final = f_init - 10 * outer_tol  #prime the loop
    x_start = x0

    i = 1
    cnt_at_same_maxiter = 1
    inner_maxiter = min_inner_maxiter

    while (f_init - f_final > outer_tol
           or inner_maxiter < max_inner_maxiter) and i < max_outer_iter:
        if f_init - f_final <= outer_tol and inner_maxiter < max_inner_maxiter:
            inner_maxiter *= 10
            cnt_at_same_maxiter = 1
        if cnt_at_same_maxiter > 10 and inner_maxiter > min_inner_maxiter:
            inner_maxiter /= 10
            cnt_at_same_maxiter = 1
        f_init = f_final

        print(
            ">>> fmin_supersimplex: outer iteration %d (inner_maxiter = %d)" %
            (i, inner_maxiter))
        i += 1
        cnt_at_same_maxiter += 1

        opts = {
            'maxiter': inner_maxiter,
            'maxfev': inner_maxiter,
            'disp': False
        }
        inner_solution = _spo.minimize(fn,
                                       x_start,
                                       options=opts,
                                       method='Nelder-Mead',
                                       callback=None,
                                       tol=inner_tol)

        if not inner_solution.success:
            print("WARNING: fmin_supersimplex inner loop failed (tol=%g, maxiter=%d): %s" \
                % (inner_tol,inner_maxiter,inner_solution.message))

        f_final = inner_solution.fun
        x_start = inner_solution.x
        print(">>> fmin_supersimplex: outer iteration %d gives min = %f" %
              (i, f_final))

    solution = _optResult()
    solution.x = inner_solution.x
    solution.fun = inner_solution.fun
    if i < max_outer_iter:
        solution.success = True
    else:
        solution.success = False
        solution.message = "Maximum iterations exceeded"
    return solution
Пример #4
0
def fmin_simplex(fn, x0, slide=1.0, tol=1e-8, maxiter=1000):
    """
    Minimizes a function using a custom simplex implmentation.

    This was used primarily to check scipy's Nelder-Mead method
    and runs much slower, so there's not much reason for using
    this method.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    slide : float, optional
        Affects initial simplex point locations

    tol : float, optional
        Relative tolerance as a convergence criterion.

    maxiter : int, optional
        Maximum iterations.

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """

    # Setup intial values
    n = len(x0)
    f = _np.zeros(n + 1)
    x = _np.zeros((n + 1, n))

    x[0] = x0

    # Setup intial X range
    for i in range(1, n + 1):
        x[i] = x0
        x[i, i - 1] = x0[i - 1] + slide

    # Setup intial functions based on x's just defined
    for i in range(n + 1):
        f[i] = fn(x[i])

    # Main Loop operation, loops infinitly until break condition
    counter = 0
    while True:
        low = _np.argmin(f)
        high = _np.argmax(f)
        counter += 1

        # Compute Migration
        d = (-(n + 1) * x[high] + sum(x)) / n

        # Break if value is close
        if _np.sqrt(_np.dot(d, d) / n) < tol or counter == maxiter:
            solution = _optResult()
            solution.x = x[low]
            solution.fun = f[low]
            if counter < maxiter:
                solution.success = True
            else:
                solution.success = False
                solution.message = "Maximum iterations exceeded"
            return solution

        newX = x[high] + 2.0 * d
        newF = fn(newX)

        if newF <= f[low]:
            # Bad news, new value is lower than any other point => replace high point with new values
            x[high] = newX
            f[high] = newF
            newX = x[high] + d
            newF = fn(newX)

            # Check if need to expand
            if newF <= f[low]:
                x[high] = newX
                f[high] = newF

        else:
            # Good news, new value is higher than lowest point

            # Check if need to contract
            if newF <= f[high]:
                x[high] = newX
                f[high] = newF
            else:
                # Contraction
                newX = x[high] + 0.5 * d
                newF = fn(newX)
                if newF <= f[high]:
                    x[high] = newX
                    f[high] = newF
                else:
                    for i in range(len(x)):
                        if i != low:
                            x[i] = (x[i] - x[low])
                            f[i] = fn(x[i])
Пример #5
0
def _fmin_supersimplex(fn, x0, abs_outer_tol, rel_outer_tol, inner_tol, max_outer_iter,
                       min_inner_maxiter, max_inner_maxiter, callback, printer):
    """
    Minimize a function using repeated applications of the simplex algorithm.

    By varying the maximum number of iterations and repeatedly calling scipy's
    Nelder-Mead simplex optimization, this function performs as a robust (but
    slow) minimization.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    abs_outer_tol : float
        Absolute tolerance of outer loop

    rel_outer_tol : float
        Relative tolerance of outer loop

    inner_tol : float
        Tolerance fo inner loop

    max_outer_iter : int
        Maximum number of outer-loop iterations

    min_inner_maxiter : int
        Minimum number of inner-loop iterations

    max_inner_maxiter : int
        Maxium number of outer-loop iterations

    printer : VerbosityPrinter
        Printer for displaying output status messages.

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """
    f_init = fn(x0)
    f_final = f_init - 10 * abs_outer_tol  # prime the loop
    x_start = x0

    i = 1
    cnt_at_same_maxiter = 1
    inner_maxiter = min_inner_maxiter

    def check_convergence(fi, ff):  # absolute and relative tolerance exit condition
        return ((fi - ff) < abs_outer_tol) or abs(fi - ff) / max(abs(ff), abs_outer_tol) < rel_outer_tol

    def check_convergence_str(fi, ff):  # for printing status messages
        return "abs=%g, rel=%g" % (fi - ff, abs(fi - ff) / max(abs(ff), abs_outer_tol))

    for i in range(max_outer_iter):
        # increase inner_maxiter if seems to be converging
        if check_convergence(f_init, f_final) and inner_maxiter < max_inner_maxiter:
            inner_maxiter *= 10; cnt_at_same_maxiter = 1

        # reduce inner_maxiter if things aren't progressing (hail mary)
        if cnt_at_same_maxiter > 10 and inner_maxiter > min_inner_maxiter:
            inner_maxiter /= 10; cnt_at_same_maxiter = 1

        printer.log("Supersimplex: outer iteration %d (inner_maxiter = %d)" % (i, inner_maxiter))
        f_init = f_final
        cnt_at_same_maxiter += 1

        opts = {'maxiter': inner_maxiter, 'maxfev': inner_maxiter, 'disp': False}
        inner_solution = _spo.minimize(fn, x_start, options=opts, method='Nelder-Mead',
                                       callback=callback, tol=inner_tol)

        if not inner_solution.success:
            printer.log("WARNING: Supersimplex inner loop failed (tol=%g, maxiter=%d): %s"
                        % (inner_tol, inner_maxiter, inner_solution.message), 2)

        f_final = inner_solution.fun
        x_start = inner_solution.x
        printer.log("Supersimplex: outer iteration %d gives min = %f (%s)" % (i, f_final,
                                                                              check_convergence_str(f_init, f_final)))
        if inner_maxiter >= max_inner_maxiter:  # to exit, we must be have been using our *maximum* inner_maxiter
            if check_convergence(f_init, f_final): break  # converged!

    solution = _optResult()
    solution.x = inner_solution.x
    solution.fun = inner_solution.fun
    if i < max_outer_iter:
        solution.success = True
    else:
        solution.success = False
        solution.message = "Maximum iterations exceeded"
    return solution
Пример #6
0
def fmax_cg(f, x0, maxiters=100, tol=1e-8, dfdx_and_bdflag=None, xopt=None):
    """
    Custom conjugate-gradient (CG) routine for maximizing a function.

    This function runs slower than scipy.optimize's 'CG' method, but doesn't
    give up or get stuck as easily, and so sometimes can be a better option.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    maxiters : int, optional
        Maximum iterations.

    tol : float, optional
        Tolerace for convergence (compared to absolute difference in f)

    dfdx_and_bdflag : function, optional
        Function to compute jacobian of f as well as a boundary-flag.

    xopt : numpy array, optional
        Used for debugging, output can be printed relating current optimum
        relative xopt, assumed to be a known good optimum.

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.  **Note:** returns
        the negated maximum in 'fun' in order to conform to the return value of
        other minimization routines.
    """

    MIN_STEPSIZE = 1e-8
    FINITE_DIFF_STEP = 1e-4
    RESET = 5
    stepsize = 1e-6

    #if no dfdx specifed, use finite differences
    if dfdx_and_bdflag is None:

        def dfdx_and_bdflag(x):
            return _finite_diff_dfdx_and_bdflag(f, x, FINITE_DIFF_STEP)

    step = 0
    x = x0
    last_fx = f(x0)
    last_x = x0
    lastchange = 0
    lastgradnorm = 0.0  # Safer than relying on uninitialized variables
    lastgrad = 0.0
    if last_fx is None: raise ValueError("fmax_cg was started out of bounds!")
    while (step < maxiters and ((stepsize > MIN_STEPSIZE) or
                                (step % RESET != 1 and RESET > 1))):

        grad, boundaryFlag = dfdx_and_bdflag(x)
        gradnorm = _np.dot(grad, grad)
        if step % RESET == 0:  # reset change == gradient
            change = grad[:]
        else:  # add gradient to change (conjugate gradient)
            #beta = gradnorm / lastgradnorm # Fletcher-Reeves
            beta = (gradnorm -
                    _np.dot(grad, lastgrad)) / lastgradnorm  # Polak-Ribiere
            #beta = (gradnorm - _np.dot(grad,lastgrad))/(_np.dot(lastchange,grad-lastgrad)) #Hestenes-Stiefel
            change = grad + beta * lastchange

        for i in range(len(change)):
            if boundaryFlag[i] * change[i] > 0:
                change[i] = 0
                print("DEBUG: fmax Preventing motion along dim %s" % i)

        if max(abs(change)) == 0:
            print("Warning: Completely Boxed in!")
            fx = last_fx
            x = last_x
            assert (abs(last_fx - f(last_x)) < 1e-6)
            break
            #i = list(abs(grad)).index(min(abs(grad)))
            #change[i] = -boundaryFlag[i] * 1.0 # could pick a random direction to move in?
            #gradnorm = 1.0  # punt...

        lastgrad = grad
        lastgradnorm = gradnorm

        multiplier = 1.0 / max(abs(change))
        change *= multiplier

        # Now "change" has largest element 1.  Time to do a linear search to find optimal stepsize.
        # If the last step had crazy short length, reset stepsize
        if stepsize < MIN_STEPSIZE: stepsize = MIN_STEPSIZE

        def g(s):
            return f(
                x + s * change
            )  # f along a line given by changedir.  Argument to function is stepsize.

        stepsize = _maximize1D(
            g, 0, abs(stepsize),
            last_fx)  # find optimal stepsize along change direction

        predicted_difference = stepsize * _np.dot(grad, change)
        if xopt is not None:            xopt_dot = _np.dot(change, xopt - x) / \
(_np.linalg.norm(change) * _np.linalg.norm(xopt - x))
        x += stepsize * change
        fx = f(x)
        difference = fx - last_fx
        print(
            "DEBUG: Max iter ", step, ": f=", fx, ", dexpct=",
            predicted_difference - difference, ", step=", stepsize,
            ", xopt_dot=", xopt_dot if xopt is not None else "--",
            ", chg_dot=",
            _np.dot(change, lastchange) /
            (_np.linalg.norm(change) * _np.linalg.norm(lastchange) + 1e-6))

        if abs(difference) < tol: break  # Convergence condition

        lastchange = change
        last_fx = fx
        last_x = x.copy()
        step += 1

    print("Finished Custom Contrained Newton CG Method")
    print(" iterations = %d" % step)
    print(" maximum f = %g" % fx)

    solution = _optResult()
    # negate maximum to conform to other minimization routines
    solution.x = x
    solution.fun = -fx if fx is not None else None
    if step < maxiters:
        solution.success = True
    else:
        solution.success = False
        solution.message = "Maximum iterations exceeded"
    return solution
Пример #7
0
def fmin_evolutionary(f, x0, num_generations, num_individuals):
    """
    Minimize a function using an evolutionary algorithm.

    Uses python's deap package to perform an evolutionary 
    algorithm to find a function's global minimum.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    num_generations : int
        The number of generations to carry out. (similar to the number 
        of iterations)

    num_individuals : int
        The number of individuals in each generation.  More individuals
        make finding the global optimum more likely, but take longer
        to run.


    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """
    
    import deap as _deap
    import deap.creator as _creator
    import deap.base as _base
    import deap.tools as _tools
    numParams = len(x0)

    # Create the individual class
    _creator.create("FitnessMin", _base.Fitness, weights=(-1.0,))
    _creator.create("Individual", list, fitness=_creator.FitnessMin)

    # Create initialization functions
    toolbox = _base.Toolbox()
    toolbox.register("random", _np.random.random)
    toolbox.register("individual", _tools.initRepeat, _creator.Individual,
                     toolbox.random, n=numParams) # fn to init an individual from a list of numParams random numbers
    toolbox.register("population", _tools.initRepeat, list, toolbox.individual) # fn to create a population (still need to specify n)

    # Create operation functions
    def evaluate(individual):
        return f( _np.array(individual) ),  #note: must return a tuple

    toolbox.register("mate", _tools.cxTwoPoint)
    toolbox.register("mutate", _tools.mutGaussian, mu=0, sigma=0.5, indpb=0.1)
    toolbox.register("select", _tools.selTournament, tournsize=3)
    toolbox.register("evaluate", evaluate)

    # Create the population
    pop = toolbox.population(n=num_individuals) 

    # Evaluate the entire population
    fitnesses = map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    PROB_TO_CROSS = 0.5
    PROB_TO_MUTATE = 0.2

    # Initialize statistics
    stats = _tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("avg", _np.mean)
    stats.register("std", _np.std)
    stats.register("min", _np.min)
    stats.register("max", _np.max)
    logbook = _tools.Logbook()

    #Run algorithm
    for g in range(num_generations):
        record = stats.compile(pop)
        logbook.record(gen=g, **record)
        print "Gen %d: %s" % (g,record)

        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = map(toolbox.clone, offspring)

        # Apply crossover on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if _np.random.random() < PROB_TO_CROSS:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        # Apply mutation on the offspring
        for mutant in offspring:
            if _np.random.random() < PROB_TO_MUTATE:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # The population is entirely replaced by the offspring
        pop[:] = offspring
        
    #get best individual and return params
    indx_min_fitness = _np.argmin( [ ind.fitness.values[0] for ind in pop ] )
    best_params = _np.array(pop[indx_min_fitness])

    solution = _optResult()
    solution.x = best_params; solution.fun = pop[indx_min_fitness].fitness.values[0]
    solution.success = True
    return solution
Пример #8
0
def fmin_particle_swarm(f, x0, err_crit, iter_max, popsize=100, c1=2, c2=2):
    """
    A simple implementation of the Particle Swarm Optimization Algorithm.
    Pradeep Gowda 2009-03-16

    Parameters
    ----------
    f : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    err_crit : float
        Critical error (i.e. tolerance).  Stops when error < err_crit.
        
    iter_max : int
        Maximum iterations.

    popsize : int, optional
        Population size.  Larger populations are better at finding the global
        optimum but make the algorithm take longer to run.

    c1 : float, optional
        Coefficient describing a particle's affinity for it's (local) maximum.

    c2 : float, optional
        Coefficient describing a particle's affinity for the best maximum any
        particle has seen (the current global max).

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """
    dimensions = len(x0)
    LARGE = 1e10

    class Particle:
        pass

    #initialize the particles
    particles = []
    for i in range(popsize):
        p = Particle()
        p.params = x0 + 2 * (_np.random.random(dimensions) - 0.5)
        p.best = p.params[:]
        p.fitness = LARGE # large == bad fitness
        p.v = _np.zeros(dimensions)
        particles.append(p)

    # let the first particle be the global best
    gbest = particles[0]; ibest = 0
    bDoLocalFitnessOpt = False

    #DEBUG
    #if False:
    #    import pickle as _pickle
    #    bestGaugeMx = _pickle.load(open("bestGaugeMx.debug"))
    #    lbfgsbGaugeMx = _pickle.load(open("lbfgsbGaugeMx.debug"))
    #    cgGaugeMx = _pickle.load(open("cgGaugeMx.debug"))
    #    initialGaugeMx = x0.reshape( (4,4) )
    #
    #    #DEBUG: dump line cut to plot
    #    nPts = 100
    #    print "DEBUG: best offsets = \n", bestGaugeMx - initialGaugeMx
    #    print "DEBUG: lbfgs offsets = \n", lbfgsbGaugeMx - initialGaugeMx
    #    print "DEBUG: cg offsets = \n", cgGaugeMx - initialGaugeMx
    #
    #    print "# DEBUG plot"
    #    #fDebug = open("x0ToBest.dat","w")
    #    #fDebug = open("x0ToLBFGS.dat","w")
    #    fDebug = open("x0ToCG.dat","w")
    #    #fDebug = open("LBFGSToBest.dat","w")
    #    #fDebug = open("CGToBest.dat","w")
    #    #fDebug = open("CGToLBFGS.dat","w")
    #
    #    for i in range(nPts+1):
    #        alpha = float(i) / nPts
    #        #matM = (1.0-alpha) * initialGaugeMx + alpha*bestGaugeMx 
    #        #matM = (1.0-alpha) * initialGaugeMx + alpha*lbfgsbGaugeMx
    #        matM = (1.0-alpha) * initialGaugeMx + alpha*cgGaugeMx
    #        #matM = (1.0-alpha) * lbfgsbGaugeMx + alpha*bestGaugeMx 
    #        #matM = (1.0-alpha) * cgGaugeMx + alpha*bestGaugeMx 
    #        #matM = (1.0-alpha) * cgGaugeMx + alpha*lbfgsbGaugeMx
    #        print >> fDebug, "%g %g" % (alpha, f(matM.flatten()))
    #    exit()
    #
    #
    #    fDebug = open("lineDataFromX0.dat","w")
    #    min_offset = -1; max_offset = 1
    #    for i in range(nPts+1):
    #        offset = min_offset + float(i)/nPts * (max_offset-min_offset)
    #        print >> fDebug, "%g" % offset,
    #
    #        for k in range(len(x0)):
    #            x = x0.copy(); x[k] += offset
    #            try:
    #                print >> fDebug, " %g" % f(x),
    #            except:
    #                print >> fDebug, " nan",
    #        print >> fDebug, ""
    #
    #    print >> fDebug, "#END DEBUG plot"
    #    exit()
    #END DEBUG


    #err = 1e10
    for iter_num in range(iter_max):
        w = 1.0 #- i/iter_max

        #bDoLocalFitnessOpt = bool(iter_num > 20 and abs(lastBest-gbest.fitness) < 0.001 and iter_num % 10 == 0)
        lastBest = gbest.fitness
        minDistToBest = 1e10; minV = 1e10; maxV = 0 #DEBUG

        for (ip,p) in enumerate(particles):
            fitness = f(p.params)

            #if bDoLocalFitnessOpt:
            #    opts = {'maxiter': 100, 'maxfev': 100, 'disp': False }
            #    local_soln = _spo.minimize(f,p.params,options=opts, method='L-BFGS-B',callback=None, tol=1e-2)
            #    p.params = local_soln.x
            #    fitness = local_soln.fun

            if fitness < p.fitness:  #low 'fitness' is good b/c we're minimizing
                p.fitness = fitness
                p.best = p.params

            if fitness < gbest.fitness:
                gbest = p; ibest = ip

            v = w*p.v + c1 * _np.random.random() * (p.best - p.params) \
                    + c2 * _np.random.random() * (gbest.params - p.params)
            p.params = p.params + v
            for (i,pv) in enumerate(p.params):
                p.params[i] = ((pv+1) % 2) - 1 #periodic b/c on box between -1 and 1

            #from .. import tools as tools_
            #matM = p.params.reshape( (4,4) )  #DEBUG
            #minDistToBest = min(minDistToBest, _tools.frobeniusdist(
            #                                    bestGaugeMx,matM)) #DEBUG
            #minV = min( _np.linalg.norm(v), minV)
            #maxV = max( _np.linalg.norm(v), maxV)

        #print "DB: min diff from best = ", minDistToBest #DEBUG
        #print "DB: min,max v = ", (minV,maxV)


        #if False: #bDoLocalFitnessOpt:
        #    opts = {'maxiter': 100, 'maxfev': 100, 'disp': False }
        #    print "initial fun = ",gbest.fitness,
        #    local_soln = _spo.minimize(f,gbest.params,options=opts, method='L-BFGS-B',callback=None, tol=1e-5)
        #    gbest.params = local_soln.x
        #    gbest.fitness = local_soln.fun
        #    print "  final fun = ",gbest.fitness
            
        print "Iter %d: global best = %g (index %d)" % (iter_num, gbest.fitness, ibest)
          
        #if err < err_crit:  break  #TODO: stopping condition

    ## Uncomment to print particles
    #for p in particles:
    #    print 'params: %s, fitness: %s, best: %s' % (p.params, p.fitness, p.best)

    solution = _optResult()
    solution.x = gbest.params; solution.fun = gbest.fitness
    solution.success = True
#    if iter_num < maxiter:
#        solution.success = True
#    else: 
#        solution.success = False
#        solution.message = "Maximum iterations exceeded"
    return solution
Пример #9
0
def fmin_simplex(fn, x0, slide=1.0, tol=1e-8, maxiter=1000):
    """
    Minimizes a function using a custom simplex implmentation.

    This was used primarily to check scipy's Nelder-Mead method
    and runs much slower, so there's not much reason for using
    this method.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    slide : float, optional
        Affects initial simplex point locations

    tol : float, optional
        Relative tolerance as a convergence criterion.

    maxiter : int, optional
        Maximum iterations.

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """

    # Setup intial values
    n = len(x0)
    f = _np.zeros(n+1)
    x = _np.zeros((n+1,n))

    x[0] = x0

    # Setup intial X range
    for i in range(1,n+1):
        x[i] = x0
        x[i,i-1] = x0[i-1] + slide

    # Setup intial functions based on x's just defined
    for i in range(n+1):
        f[i] = fn(x[i])

    # Main Loop operation, loops infinitly until break condition
    counter = 0
    while True:
        low = _np.argmin(f)
        high = _np.argmax(f)
        counter += 1

	# Compute Migration
        d = (-(n+1)*x[high]+sum(x))/n

        # Break if value is close
        if _np.sqrt(_np.dot(d,d)/n)<tol or counter == maxiter:
            solution = _optResult()
            solution.x = x[low]; solution.fun = f[low]
            if counter < maxiter:
                solution.success = True
            else: 
                solution.success = False
                solution.message = "Maximum iterations exceeded"
            return solution

        newX = x[high] + 2.0*d
        newF = fn(newX)

        if newF <= f[low]:
            # Bad news, new value is lower than any other point => replace high point with new values
            x[high] = newX
            f[high] = newF
            newX = x[high] + d
            newF = fn(newX)

	    # Check if need to expand
            if newF <= f[low]:
                x[high] = newX
                f[high] = newF

        else:
            # Good news, new value is higher than lowest point

            # Check if need to contract
            if newF <= f[high]:
                x[high] = newX
                f[high] = newF
            else:
                # Contraction
                newX = x[high] + 0.5*d
                newF = fn(newX)
                if newF <= f[high]:
                    x[high] = newX
                    f[high] = newF
                else:
                    for i in range(len(x)):
                        if i!=low:
                            x[i] = (x[i]-x[low])
                            f[i] = fn(x[i])
Пример #10
0
def fmin_supersimplex(fn, x0, outer_tol, inner_tol, max_outer_iter, min_inner_maxiter, max_inner_maxiter):
    """
    Minimize a function using repeated applications of the simplex algorithm.

    By varying the maximum number of iterations and repeatedly calling scipy's
    Nelder-Mead simplex optimization, this function performs as a robust (but
    slow) minimization.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    outer_tol : float
        Tolerance of outer loop
       
    inner_tol : float
        Tolerance fo inner loop

    max_outer_iter : int
        Maximum number of outer-loop iterations
    
    min_inner_maxiter : int
        Minimum number of inner-loop iterations

    max_inner_maxiter : int
        Maxium number of outer-loop iterations

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """
    f_init = fn(x0)
    f_final = f_init - 10*outer_tol #prime the loop
    x_start = x0

    i = 1
    cnt_at_same_maxiter = 1
    inner_maxiter = min_inner_maxiter
    
    while ( f_init-f_final > outer_tol or inner_maxiter < max_inner_maxiter) and i < max_outer_iter:
        if f_init-f_final <= outer_tol and inner_maxiter < max_inner_maxiter: 
            inner_maxiter *= 10; cnt_at_same_maxiter = 1
        if cnt_at_same_maxiter > 10 and inner_maxiter > min_inner_maxiter:
            inner_maxiter /= 10; cnt_at_same_maxiter = 1
        f_init = f_final
        
        print ">>> fmin_supersimplex: outer iteration %d (inner_maxiter = %d)" % (i,inner_maxiter)
        i += 1; cnt_at_same_maxiter += 1

        opts = {'maxiter': inner_maxiter, 'maxfev': inner_maxiter, 'disp': False }
        inner_solution = _spo.minimize(fn,x_start,options=opts, method='Nelder-Mead',callback=None, tol=inner_tol)

        if not inner_solution.success:
            print "WARNING: fmin_supersimplex inner loop failed (tol=%g, maxiter=%d): %s" \
                % (inner_tol,inner_maxiter,inner_solution.message)

        f_final = inner_solution.fun
        x_start = inner_solution.x
        print ">>> fmin_supersimplex: outer iteration %d gives min = %f" % (i,f_final)

    solution = _optResult()
    solution.x = inner_solution.x
    solution.fun = inner_solution.fun
    if i < max_outer_iter:
        solution.success = True
    else: 
        solution.success = False
        solution.message = "Maximum iterations exceeded"
    return solution
Пример #11
0
def fmax_cg(f, x0, maxiters=100, tol=1e-8, dfdx_and_bdflag = None, xopt=None):
    """
    Custom conjugate-gradient (CG) routine for maximizing a function.

    This function runs slower than scipy.optimize's 'CG' method, but doesn't
    give up or get stuck as easily, and so sometimes can be a better option.

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    maxiters : int, optional
        Maximum iterations.

    tol : float, optional
        Tolerace for convergence (compared to absolute difference in f)
     
    dfdx_and_bdflag : function, optional
        Function to compute jacobian of f as well as a boundary-flag.

    xopt : numpy array, optional
        Used for debugging, output can be printed relating current optimum
        relative xopt, assumed to be a known good optimum.

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.  **Note:** returns
        the negated maximum in 'fun' in order to conform to the return value of
        other minimization routines.
    """

    MIN_STEPSIZE = 1e-8
    FINITE_DIFF_STEP = 1e-4
    RESET = 5
    stepsize = 1e-6

    #if no dfdx specifed, use finite differences
    if dfdx_and_bdflag is None:
        dfdx_and_bdflag = lambda x: _finite_diff_dfdx_and_bdflag(f,x,FINITE_DIFF_STEP)

    step = 0
    x = x0; last_fx = f(x0); last_x = x0
    lastchange = 0
    if last_fx is None: raise ValueError("fmax_cg was started out of bounds!")
    while( step < maxiters and ((stepsize>MIN_STEPSIZE) or (step%RESET !=1 and RESET>1))):

        grad,boundaryFlag = dfdx_and_bdflag(x)
        gradnorm = _np.dot(grad,grad)
        if step%RESET == 0: #reset change == gradient
            change = grad[:]
        else: # add gradient to change (conjugate gradient)
            #beta = gradnorm / lastgradnorm # Fletcher-Reeves
            beta = (gradnorm - _np.dot(grad,lastgrad))/lastgradnorm # Polak-Ribiere
            #beta = (gradnorm - _np.dot(grad,lastgrad))/(_np.dot(lastchange,grad-lastgrad)) #Hestenes-Stiefel
            change = grad + beta * lastchange

        for i in range(len(change)):
            if boundaryFlag[i]*change[i] > 0: 
                change[i] = 0
                print "DEBUG: fmax Preventing motion along dim ",i
        
        if max(abs(change)) == 0:
            print "Warning: Completely Boxed in!"
            fx = last_fx; x = last_x
            assert( abs(last_fx - f(last_x)) < 1e-6)
            break
            #i = list(abs(grad)).index(min(abs(grad)))
            #change[i] = -boundaryFlag[i] * 1.0 # could pick a random direction to move in?
            #gradnorm = 1.0  # punt...

        lastgrad = grad
        lastgradnorm = gradnorm
        
        multiplier = 1.0/max(abs(change))
        change *= multiplier
        
        # Now "change" has largest element 1.  Time to do a linear search to find optimal stepsize.
        # If the last step had crazy short length, reset stepsize
        if stepsize < MIN_STEPSIZE: stepsize = MIN_STEPSIZE;
        g = lambda s: f(x+s*change) # f along a line given by changedir.  Argument to function is stepsize.
        stepsize = _maximize1D(g,0,abs(stepsize),last_fx)  #find optimal stepsize along change direction
		
        predicted_difference = stepsize * _np.dot(grad,change)
        if xopt is not None: xopt_dot = _np.dot(change, xopt-x) / (_np.linalg.norm(change) * _np.linalg.norm(xopt-x))
        x += stepsize * change; fx = f(x)
        difference = fx - last_fx
        print "DEBUG: Max iter ", step, ": f=",fx,", dexpct=",predicted_difference-difference, \
            ", step=",stepsize,", xopt_dot=", xopt_dot if xopt is not None else "--", \
            ", chg_dot=",_np.dot(change,lastchange)/(_np.linalg.norm(change)*_np.linalg.norm(lastchange)+1e-6)

        if abs(difference) < tol: break #Convergence condition

        lastchange = change
        last_fx = fx
        last_x = x.copy()
        step += 1

    print "Finished Custom Contrained Newton CG Method"
    print " iterations = %d" % step
    print " maximum f = %g" % fx

    solution = _optResult()
    solution.x = x; solution.fun = -fx if fx is not None else None  # negate maximum to conform to other minimization routines
    if step < maxiters:
        solution.success = True
    else: 
        solution.success = False
        solution.message = "Maximum iterations exceeded"
    return solution