Ejemplo n.º 1
0
    def __init__(self, dim, nbins=8):
        """
Takes two initial inputs: 
    dim   -- dimensionality of the problem
    nbins -- tuple of number of bins in each dimension

All important class members are inherited from AbstractEnsembleSolver.
        """
        super(LatticeSolver, self).__init__(dim, nbins=nbins)
        from mystic.termination import NormalizedChangeOverGeneration
        convergence_tol = 1e-4
        self._termination = NormalizedChangeOverGeneration(convergence_tol)
Ejemplo n.º 2
0
    def __init__(self, dim, npts=8):
        """
Takes two initial inputs: 
    dim   -- dimensionality of the problem
    npts  -- number of parallel solver instances

All important class members are inherited from AbstractEnsembleSolver.
        """
        super(BuckshotSolver, self).__init__(dim, npts=npts)
        from mystic.termination import NormalizedChangeOverGeneration
        convergence_tol = 1e-4
        self._termination = NormalizedChangeOverGeneration(convergence_tol)
Ejemplo n.º 3
0
    def __init__(self, dim, npts=8, rtol=None):
        """
Takes three initial inputs: 
    dim   -- dimensionality of the problem
    npts  -- number of parallel solver instances
    rtol  -- size of radial tolerance for sparsity

All important class members are inherited from AbstractEnsembleSolver.
        """
        super(SparsitySolver, self).__init__(dim, npts=npts, rtol=rtol)
        from mystic.termination import NormalizedChangeOverGeneration
        convergence_tol = 1e-4
        self._termination = NormalizedChangeOverGeneration(convergence_tol)
Ejemplo n.º 4
0
def buckshot(cost,ndim,npts=8,args=(),bounds=None,ftol=1e-4,maxiter=None, \
             maxfun=None,full_output=0,disp=1,retall=0,callback=None,**kwds):
    """Minimize a function using the buckshot ensemble solver.
    
Uses a buckshot ensemble algorithm to find the minimum of a function of one or
more variables. Mimics the ``scipy.optimize.fmin`` interface. Starts *npts*
solver instances at random points in parameter space. 

Args:
    cost (func): the function or method to be minimized: ``y = cost(x)``.
    ndim (int): dimensionality of the problem.
    npts (int, default=8): number of solver instances.
    args (tuple, default=()): extra arguments for cost.
    bounds (list(tuple), default=None): list of pairs of bounds (min,max),
        one for each parameter.
    ftol (float, default=1e-4): acceptable relative error in ``cost(xopt)``
        for convergence.
    gtol (float, default=10): maximum iterations to run without improvement.
    maxiter (int, default=None): the maximum number of iterations to perform.
    maxfun (int, default=None): the maximum number of function evaluations.
    full_output (bool, default=False): True if fval and warnflag are desired.
    disp (bool, default=True): if True, print convergence messages.
    retall (bool, default=False): if True, return list of solutions at each
        iteration.
    callback (func, default=None): function to call after each iteration. The
        interface is ``callback(xk)``, with xk the current parameter vector.
    solver (solver, default=None): override the default nested Solver instance.
    handler (bool, default=False): if True, enable handling interrupt signals.
    itermon (monitor, default=None): override the default GenerationMonitor.
    evalmon (monitor, default=None): override the default EvaluationMonitor.
    constraints (func, default=None): a function ``xk' = constraints(xk)``,
        where xk is the current parameter vector, and xk' is a parameter
        vector that satisfies the encoded constraints.
    penalty (func, default=None): a function ``y = penalty(xk)``, where xk is
        the current parameter vector, and ``y' == 0`` when the encoded
        constraints are satisfied (and ``y' > 0`` otherwise).
    dist (mystic.math.Distribution, default=None): generate randomness in
        ensemble starting position using the given distribution.

Returns:
    ``(xopt, {fopt, iter, funcalls, warnflag, allfuncalls}, {allvecs})``

Notes:
    - xopt (*ndarray*): the minimizer of the cost function
    - fopt (*float*): value of cost function at minimum: ``fopt = cost(xopt)``
    - iter (*int*): number of iterations
    - funcalls (*int*): number of function calls
    - warnflag (*int*): warning flag:
        - ``1 : Maximum number of function evaluations``
        - ``2 : Maximum number of iterations``
    - allfuncalls (*int*): total function calls (for all solver instances)
    - allvecs (*list*): a list of solutions at each iteration
    """
    handler = kwds['handler'] if 'handler' in kwds else False
    from mystic.solvers import NelderMeadSimplexSolver as _solver
    if 'solver' in kwds: _solver = kwds['solver']

    from mystic.monitors import Monitor
    stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor()
    evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor()

    gtol = 10  # termination generations (scipy: 2, default: 10)
    if 'gtol' in kwds: gtol = kwds['gtol']
    if gtol:  #if number of generations is provided, use NCOG
        from mystic.termination import NormalizedChangeOverGeneration
        termination = NormalizedChangeOverGeneration(ftol, gtol)
    else:
        from mystic.termination import VTRChangeOverGeneration
        termination = VTRChangeOverGeneration(ftol)

    solver = BuckshotSolver(ndim, npts)
    solver.SetNestedSolver(_solver)  #XXX: skip settings for configured solver?
    solver.SetEvaluationLimits(maxiter, maxfun)
    solver.SetEvaluationMonitor(evalmon)
    solver.SetGenerationMonitor(stepmon)
    if 'dist' in kwds:
        solver.SetDistribution(kwds['dist'])
    if 'penalty' in kwds:
        solver.SetPenalty(kwds['penalty'])
    if 'constraints' in kwds:
        solver.SetConstraints(kwds['constraints'])
    if bounds is not None:
        minb, maxb = unpair(bounds)
        solver.SetStrictRanges(minb, maxb)

    if handler: solver.enable_signal_handler()
    solver.Solve(cost,termination=termination,disp=disp, \
                 ExtraArgs=args,callback=callback)
    solution = solver.Solution()

    # code below here pushes output to scipy.optimize.fmin interface
    msg = solver.Terminated(disp=False, info=True)

    x = solver.bestSolution
    fval = solver.bestEnergy
    warnflag = 0
    fcalls = solver.evaluations
    all_fcalls = solver._total_evals
    iterations = solver.generations
    allvecs = solver._stepmon.x

    if fcalls >= solver._maxfun:  #XXX: check against total or individual?
        warnflag = 1
    elif iterations >= solver._maxiter:  #XXX: check against total or individual?
        warnflag = 2
    else:
        pass

    if full_output:
        retlist = x, fval, iterations, fcalls, warnflag, all_fcalls
        if retall:
            retlist += (allvecs, )
    else:
        retlist = x
        if retall:
            retlist = (x, allvecs)

    return retlist
Ejemplo n.º 5
0
if __name__ == '__main__':
    info = 'self'  #False #True
    _all = And  #__and
    _any = Or  #__or
    _one = When  #__when

    from mystic.solvers import DifferentialEvolutionSolver
    s = DifferentialEvolutionSolver(4, 4)

    from mystic.termination import VTR
    from mystic.termination import ChangeOverGeneration
    from mystic.termination import NormalizedChangeOverGeneration
    v = VTR()
    c = ChangeOverGeneration()
    n = NormalizedChangeOverGeneration()

    print "define conditions..."
    _v = _one(v)
    _c = _one(c)
    _n = _one(n)
    vAc = _all(v, c)
    vAn = _all(v, n)
    vOc = _any(v, c)
    vOn = _any(v, n)
    vAc_On = _any(vAc, _n)
    vAc_Oc = _any(vAc, _c)
    vAn_On = _any(vAn, _n)
    cAv = _all(c, v)
    cOv = _any(c, v)
    c_OcAv = _any(_c, cAv)
Ejemplo n.º 6
0
def buckshot(cost,ndim,npts=8,args=(),bounds=None,ftol=1e-4,maxiter=None, \
             maxfun=None,full_output=0,disp=1,retall=0,callback=None,**kwds):
    """Minimize a function using the buckshot ensemble solver.
    
Description:

    Uses a buckshot ensemble algorithm to find the minimum of
    a function of one or more variables. Mimics the scipy.optimize.fmin
    interface. Starts 'npts' solver instances at random points in parameter
    space. 

Inputs:

    cost -- the Python function or method to be minimized.
    ndim -- dimensionality of the problem.
    npts -- number of solver instances.

Additional Inputs:

    args -- extra arguments for cost.
    bounds -- list - n pairs of bounds (min,max), one pair for each parameter.
    ftol -- number - acceptable relative error in cost(xopt) for convergence.
    gtol -- number - maximum number of iterations to run without improvement.
    maxiter -- number - the maximum number of iterations to perform.
    maxfun -- number - the maximum number of function evaluations.
    full_output -- number - non-zero if fval and warnflag outputs are desired.
    disp -- number - non-zero to print convergence messages.
    retall -- number - non-zero to return list of solutions at each iteration.
    callback -- an optional user-supplied function to call after each
        iteration.  It is called as callback(xk), where xk is the
        current parameter vector.
    solver -- solver - override the default nested Solver instance.
    handler -- boolean - enable/disable handling of interrupt signal.
    itermon -- monitor - override the default GenerationMonitor.
    evalmon -- monitor - override the default EvaluationMonitor.
    constraints -- an optional user-supplied function.  It is called as
        constraints(xk), where xk is the current parameter vector.
        This function must return xk', a parameter vector that satisfies
        the encoded constraints.
    penalty -- an optional user-supplied function.  It is called as
        penalty(xk), where xk is the current parameter vector.
        This function should return y', with y' == 0 when the encoded
        constraints are satisfied, and y' > 0 otherwise.
    dist -- an optional mystic.math.Distribution instance.  If provided,
        this distribution generates randomness in ensemble starting position.

Returns: (xopt, {fopt, iter, funcalls, warnflag, allfuncalls}, {allvecs})

    xopt -- ndarray - minimizer of function
    fopt -- number - value of function at minimum: fopt = cost(xopt)
    iter -- number - number of iterations
    funcalls -- number - number of function calls
    warnflag -- number - Integer warning flag:
        1 : 'Maximum number of function evaluations.'
        2 : 'Maximum number of iterations.'
    allfuncalls -- number - total function calls (for all solver instances)
    allvecs -- list - a list of solutions at each iteration

    """
    handler = kwds['handler'] if 'handler' in kwds else False
    from mystic.solvers import NelderMeadSimplexSolver as _solver
    if 'solver' in kwds: _solver = kwds['solver']

    from mystic.monitors import Monitor
    stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor()
    evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor()

    gtol = 10 # termination generations (scipy: 2, default: 10)
    if 'gtol' in kwds: gtol = kwds['gtol']
    if gtol: #if number of generations is provided, use NCOG
        from mystic.termination import NormalizedChangeOverGeneration
        termination = NormalizedChangeOverGeneration(ftol,gtol)
    else:
        from mystic.termination import VTRChangeOverGeneration
        termination = VTRChangeOverGeneration(ftol)

    solver = BuckshotSolver(ndim,npts)
    solver.SetNestedSolver(_solver) #XXX: skip settings for configured solver?
    solver.SetEvaluationLimits(maxiter,maxfun)
    solver.SetEvaluationMonitor(evalmon)
    solver.SetGenerationMonitor(stepmon)
    if 'dist' in kwds:
        solver.SetDistribution(kwds['dist'])
    if 'penalty' in kwds:
        solver.SetPenalty(kwds['penalty'])
    if 'constraints' in kwds:
        solver.SetConstraints(kwds['constraints'])
    if bounds is not None:
        minb,maxb = unpair(bounds)
        solver.SetStrictRanges(minb,maxb)

    if handler: solver.enable_signal_handler()
    solver.Solve(cost,termination=termination,disp=disp, \
                 ExtraArgs=args,callback=callback)
    solution = solver.Solution()

    # code below here pushes output to scipy.optimize.fmin interface
    msg = solver.Terminated(disp=False, info=True)

    x = solver.bestSolution
    fval = solver.bestEnergy
    warnflag = 0
    fcalls = solver.evaluations
    all_fcalls = solver._total_evals
    iterations = solver.generations
    allvecs = solver._stepmon.x

    if fcalls >= solver._maxfun: #XXX: check against total or individual?
        warnflag = 1
    elif iterations >= solver._maxiter: #XXX: check against total or individual?
        warnflag = 2
    else: pass

    if full_output:
        retlist = x, fval, iterations, fcalls, warnflag, all_fcalls
        if retall:
            retlist += (allvecs,)
    else:
        retlist = x
        if retall:
            retlist = (x, allvecs)

    return retlist
Ejemplo n.º 7
0
def test_me(info='self'):
    from mystic.solvers import DifferentialEvolutionSolver
    s = DifferentialEvolutionSolver(4, 4)

    from mystic.termination import VTR
    from mystic.termination import ChangeOverGeneration
    from mystic.termination import NormalizedChangeOverGeneration
    v = VTR()
    c = ChangeOverGeneration()
    n = NormalizedChangeOverGeneration()

    if disp: print("define conditions...")
    _v = When(v)
    _c = When(c)
    _n = When(n)
    v_and_c = And(v, c)
    v_and_n = And(v, n)
    v_or_c = Or(v, c)
    v_or_n = Or(v, n)
    v_and_c__or_n = Or(v_and_c, _n)
    v_and_c__or_c = Or(v_and_c, _c)
    v_and_n__or_n = Or(v_and_n, _n)
    c_and_v = And(c, v)
    c_or_v = Or(c, v)
    c_or__c_and_v = Or(_c, c_and_v)

    assert len(_v) == len(_c) == len(_n) == 1
    assert list(_v).index(v) == list(_c).index(c) == list(_n).index(n) == 0
    assert list(_v).count(v) == list(_c).count(c) == list(_n).count(n) == 1
    assert v in _v and c in _c and n in _n
    assert v in v_and_c and c in v_and_c
    assert v in v_and_n and n in v_and_n
    assert v in v_or_c and c in v_or_c
    assert v in v_or_n and n in v_or_n
    assert len(v_and_c) == len(v_and_n) == len(v_or_c) == len(v_or_n) == 2
    assert len(v_and_c__or_n) == len(v_and_c__or_c) == len(v_and_n__or_n) == 2
    assert v not in v_and_c__or_n and c not in v_and_c__or_n and n not in v_and_c__or_n
    assert v_and_c in v_and_c__or_n and _n in v_and_c__or_n
    assert v_and_c in v_and_c__or_c and _c in v_and_c__or_c
    assert v_and_n in v_and_n__or_n and _n in v_and_n__or_n
    assert c in c_and_v and v in c_and_v
    assert c in c_or_v and v in c_or_v
    assert list(c_and_v).index(c) == list(v_and_c).index(v)
    assert list(c_and_v).index(v) == list(v_and_c).index(c)
    assert list(c_or_v).index(c) == list(v_or_c).index(v)
    assert list(c_or_v).index(v) == list(v_or_c).index(c)
    assert c_and_v in c_or__c_and_v and _c in c_or__c_and_v

    if disp:
        print("_v:%s" % _v)
        print("_c:%s" % _c)
        print("_n:%s" % _n)
        print("v_and_c:%s" % v_and_c)
        print("v_and_n:%s" % v_and_n)
        print("v_or_c:%s" % v_or_c)
        print("v_or_n:%s" % v_or_n)
        print("v_and_c__or_n:%s" % v_and_c__or_n)
        print("v_and_c__or_c:%s" % v_and_c__or_c)
        print("v_and_n__or_n:%s" % v_and_n__or_n)
        print("c_and_v:%s" % c_and_v)
        print("c_or_v:%s" % c_or_v)
        print("c_or__c_and_v:%s" % c_or__c_and_v)

    if disp: print("initial conditions...")
    _v_and_c = v_and_c(s, info)
    _v_and_n = v_and_n(s, info)
    _v_or_c = v_or_c(s, info)
    _v_or_n = v_or_n(s, info)

    assert not _v_and_c
    assert not _v_and_n
    assert not _v_or_c
    assert not _v_or_n

    if disp:
        print("v_and_c:%s" % _v_and_c)
        print("v_and_n:%s" % _v_and_n)
        print("v_or_c:%s" % _v_or_c)
        print("v_or_n:%s" % _v_or_n)

    if disp: print("after convergence toward zero...")
    s.energy_history = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    # _v and _n are True, _c is False
    _v_and_c = v_and_c(s, info)
    _v_and_n = v_and_n(s, info)
    _v_or_c = v_or_c(s, info)
    _v_or_n = v_or_n(s, info)

    assert not _v_and_c
    assert _v_and_n
    assert _v_or_c
    assert _v_or_n
    if info == 'self':
        assert v in _v_and_n and n in _v_and_n
        assert v in _v_or_c and c not in _v_or_c
        assert v in _v_or_n and n in _v_or_n

    if disp:
        print("v_and_c:%s" % _v_and_c)
        print("v_and_n:%s" % _v_and_n)
        print("v_or_c:%s" % _v_or_c)
        print("v_or_n:%s" % _v_or_n)

    if disp: print("nested compound termination...")
    # _v and _n are True, _c is False
    _v_and_c__or_n = v_and_c__or_n(s, info)
    _v_and_c__or_c = v_and_c__or_c(s, info)
    _v_and_n__or_n = v_and_n__or_n(s, info)

    assert _v_and_c__or_n
    assert not _v_and_c__or_c
    assert _v_and_n__or_n
    if info == 'self':
        assert _n in _v_and_c__or_n and v_and_c not in _v_and_c__or_n
        assert v not in _v_and_c__or_n and c not in _v_and_c__or_n
        assert v_and_n in _v_and_n__or_n and _n in _v_and_n__or_n
        assert v not in _v_and_n__or_n and n not in _v_and_n__or_n

    if disp:
        print("v_and_c__or_n:%s" % _v_and_c__or_n)
        print("v_and_c__or_c:%s" % _v_and_c__or_c)
        print("v_and_n__or_n:%s" % _v_and_n__or_n)

    if disp: print("individual conditions...")
    __v = _v(s, info)
    __c = _c(s, info)
    __n = _n(s, info)

    assert __v and __n
    assert not __c
    if info == 'self':
        assert v in __v and n in __n

    if disp:
        print("v:%s" % __v)
        print("c:%s" % __c)
        print("n:%s" % __n)

    if disp: print("conditions with false first...")
    _c_and_v = c_and_v(s, info)
    _c_or_v = c_or_v(s, info)
    _c_or__c_and_v = c_or__c_and_v(s, info)

    assert not _c_and_v
    assert _c_or_v
    assert not _c_or__c_and_v
    if info == 'self':
        assert v in _c_or_v and c not in _c_or_v

    if disp:
        print("c_and_v:%s" % _c_and_v)
        print("c_or_v:%s" % _c_or_v)
        print("c_or__c_and_v:%s" % _c_or__c_and_v)