Exemplo n.º 1
0
def optimize(cost,_bounds,_constraints):
  from mystic.solvers import DifferentialEvolutionSolver2
  from mystic.termination import ChangeOverGeneration as COG
  from mystic.strategy import Best1Exp
  from mystic.monitors import VerboseMonitor, Monitor
  from mystic.tools import random_seed
  from mystic.termination import Or, CollapseWeight, CollapsePosition, state


  if debug:
      random_seed(123) # or 666 to force impose_unweighted reweighting
      stepmon = VerboseMonitor(1,1)
  else:
      stepmon = VerboseMonitor(10) if verbose else Monitor()
  stepmon._npts = npts
  evalmon = Monitor()

  lb,ub = _bounds
  ndim = len(lb)

  solver = DifferentialEvolutionSolver2(ndim,npop)
  solver.SetRandomInitialPoints(min=lb,max=ub)
  solver.SetStrictRanges(min=lb,max=ub)
  solver.SetEvaluationLimits(maxiter,maxfun)
  solver.SetEvaluationMonitor(evalmon)
  solver.SetGenerationMonitor(stepmon)
  solver.SetConstraints(_constraints)

  tol = convergence_tol
  term = Or(COG(tol,ngen), CollapseWeight(), CollapsePosition())
  solver.Solve(cost,termination=term,strategy=Best1Exp, disp=verbose, \
               CrossProbability=crossover,ScalingFactor=percent_change)
 #while collapse and solver.Collapse(verbose): #XXX: total_evaluations?
 #    if debug: print(state(solver._termination).keys())
 #    solver.Solve() #XXX: cost, term, strategy, cross, scale ?
 #    if debug: solver.SaveSolver('debug.pkl')

  solved = solver.bestSolution
 #print("solved: %s" % solver.Solution())
  func_max = MINMAX * solver.bestEnergy       #NOTE: -solution assumes -Max
 #func_max = 1.0 + MINMAX*solver.bestEnergy   #NOTE: 1-sol => 1-success = fail
  func_evals = solver.evaluations
  from mystic.munge import write_support_file
  write_support_file(stepmon, npts=npts)
  return solved, func_max, func_evals
Exemplo n.º 2
0
from mystic.models.poly import chebyshev8cost as ChebyshevCost  # no helper

ND = 9
NP = 40
MAX_GENERATIONS = NP * NP
NNODES = NP / 5

seed = 321

if __name__ == '__main__':

    def print_solution(func):
        print poly1d(func)
        return

    psow = VerboseMonitor(10)
    ssow = VerboseMonitor(10)

    random_seed(seed)
    print "first sequential..."
    solver = DifferentialEvolutionSolver2(ND, NP)  #XXX: sequential
    solver.SetRandomInitialPoints(min=[-100.0] * ND, max=[100.0] * ND)
    solver.SetEvaluationLimits(generations=MAX_GENERATIONS)
    solver.SetGenerationMonitor(ssow)
    solver.Solve(ChebyshevCost, VTR(0.01), strategy=Best1Exp, \
                 CrossProbability=1.0, ScalingFactor=0.9, disp=1)
    print ""
    print_solution(solver.bestSolution)

    #'''
    random_seed(seed)
Exemplo n.º 3
0
lb = zeros(N)
ub = zeros(N) + 0.5

# build the constraints operator
from mystic.symbolic import linear_symbolic, solve, \
     generate_solvers as solvers, generate_constraint as constraint
constrain = linear_symbolic(Aeq,Beq)
constrain = constraint(solvers(solve(constrain,target=['x0'])))

from mystic import suppressed
@suppressed(1e-5)
def conserve(x):
    return constrain(x)

from mystic.monitors import VerboseMonitor
mon = VerboseMonitor(10)

# solve for alpha
from mystic.solvers import diffev
alpha = diffev(objective, list(zip(lb,.1*ub)), args=(Q,b), npop=N*3, gtol=400, \
               itermon=mon, \
               ftol=1e-5, bounds=list(zip(lb,ub)), constraints=conserve, disp=1)

print('solved x: %s' % alpha)
print("constraint A*x == 0: %s" % inner(Aeq, alpha))
print("minimum 0.5*x'Qx + b'*x: %s" % objective(alpha, Q, b))

# calculate support vectors and regression function
sv1 = SupportVectors(alpha[:nx])
sv2 = SupportVectors(alpha[nx:])
R = RegressionFunction(x, y, alpha, svr_epsilon, pk)
Exemplo n.º 4
0
    print("Powell's Method")
    print("===============")

    # initial guess
    x0 = [0.8, 1.2, 0.7]

    # define constraints factory function
    def constraints_factory(target):
        # define constraints function
        def constraints(x):
            # constrain the last x_i to be the same value as the first x_i
            x[-1] = x[0]
            # constrain x such that mean(x) == target
            if not almostEqual(mean(x), target):
                x = impose_mean(target, x)
            return x

        return constraints

    # configure constraints function
    constraints = constraints_factory(1.0)

    # configure monitor
    stepmon = VerboseMonitor(1)

    # use Powell's method to minimize the Rosenbrock function
    solution = fmin_powell(rosen, x0, constraints=constraints, itermon=stepmon)
    print(solution)

# end of file
Exemplo n.º 5
0
def func(x):
    curve = func_value(x[0:3])
    return -(sum(np.dot(curve, production)) - Q + x[3])


objective = lambda x: sum(np.dot(x[0:3], C)) + 1000 * x[3]

constraint = lambda x: func(x)


@quadratic_inequality(constraint)
def penalty(x):
    return 0.0


mon = VerboseMonitor(50)
solution = diffev2(objective,
                   x0,
                   penalty=penalty,
                   bounds=bounds,
                   itermon=mon,
                   gtol=100,
                   maxiter=1000,
                   maxfun=10000,
                   npop=40)
print(solution)

mon = VerboseMonitor(50)
solution = fmin_powell(objective,
                       x0,
                       penalty=penalty,
Exemplo n.º 6
0
  func_max = -solver.bestEnergy 
  return solved, func_max


if __name__ == '__main__':

  from mystic.monitors import Monitor, VerboseMonitor, LoggingMonitor
  from mystic.monitors import VerboseLoggingMonitor
 #monitor = Monitor()
 #monitor = Monitor(all=True)
 #monitor = Monitor(all=False)
 #monitor = VerboseMonitor(1,1) 
 #monitor = VerboseMonitor(1,1, all=True) 
 #monitor = VerboseMonitor(1,1, all=False) 
 #monitor = VerboseMonitor(0,1)
  monitor = VerboseMonitor(1,0)
 #monitor = LoggingMonitor(1)
 #monitor = LoggingMonitor(1, all=True)
 #monitor = LoggingMonitor(1, all=False)
 #monitor = VerboseLoggingMonitor(1)
 #monitor = VerboseLoggingMonitor(0,1)

 #test0(monitor)
 #test1(monitor)
  test2(monitor)                 # GenerationMonitor works like test0
 #test2(monitor, diffenv=False)  # (to make like test1, add enclosing [])
 #test2(monitor, diffenv=True)

  # these are for "MonitorPlotter(s)"; need to adapt log.py plotters for test1
  write_support_file(monitor,'paramlog1.py')  # plot with 'support_*.py'
 #write_converge_file(monitor,'paramlog2.py') #XXX: no existing plotters?
Exemplo n.º 7
0
"""
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.monitors import VerboseMonitor, Monitor
from mystic.termination import ChangeOverGeneration as COG

# kwds for solver
opts = dict(termination=COG(1e-10, 100))
param = dict(
    solver=DifferentialEvolutionSolver2,
    npop=80,  #XXX:npop
    maxiter=1500,
    maxfun=1e+6,
    x0=None,  # use RandomInitialPoints
    nested=None,  # don't use SetNested
    pool=None,  # don't use SetMapper
    stepmon=VerboseMonitor(1, label='output'),  # monitor config
    evalmon=Monitor(),  # monitor config (re-initialized in solve)
    # kwds to pass directly to Solve(objective, **opt)
    opts=opts,
)

from mystic.math.discrete import product_measure
from mystic.math import almostEqual as almost
from mystic.constraints import and_, integers
from mystic.coupler import outer

# lower and upper bound for parameters and weights
xlb = (0, 1, 0, 0, 0)
xub = (1, 10, 10, 10, 10)
wlb = (0, 1, 1, 1, 1)
wub = (1, 1, 1, 1, 1)
Exemplo n.º 8
0
  def radius(model, point, ytol=0.0, xtol=0.0, ipop=None, imax=None):
    """graphical distance between a single point x,y and a model F(x')"""
    # given a single point x,y: find the radius = |y - F(x')| + delta
    # radius is just a minimization over x' of |y - F(x')| + delta
    # where we apply a constraints function (of box constraints) of
    # |x - x'| <= xtol  (for each i in x)
    #
    # if hausdorff = some iterable, delta = |x - x'|/hausdorff
    # if hausdorff = True, delta = |x - x'|/spread(x); using the dataset range
    # if hausdorff = False, delta = 0.0
    #
    # if ipop, then DE else Powell; ytol is used in VTR(ytol)
    # and will terminate when cost <= ytol
    x,y = _get_xy(point)
    y = asarray(y)
    # catch cases where yptp or y will cause issues in normalization
   #if not isfinite(yptp): return 0.0 #FIXME: correct?  shouldn't happen
   #if yptp == 0: from numpy import inf; return inf #FIXME: this is bad

    # build the cost function
    if hausdorff: # distance in all directions
      def cost(rv):
        '''cost = |y - F(x')| + |x - x'| for each x,y (point in dataset)'''
        _y = model(rv)
        if not isfinite(_y): return abs(_y)
        errs = seterr(invalid='ignore', divide='ignore') # turn off warning 
        z = abs((asarray(x) - rv)/ptp)  # normalize by range
        m = abs(y - _y)/yptp            # normalize by range
        seterr(invalid=errs['invalid'], divide=errs['divide']) # turn on warning
        return m + sum(z[isfinite(z)])
    else:  # vertical distance only
      def cost(rv):
        '''cost = |y - F(x')| for each x,y (point in dataset)'''
        return abs(y - model(rv))

    if debug:
      print "rv: %s" % str(x)
      print "cost: %s" % cost(x)

    # if xtol=0, radius is difference in x,y and x,F(x); skip the optimization
    try:
      if not imax or not max(xtol): #iterables
        return cost(x)
    except TypeError:
      if not xtol: #non-iterables
        return cost(x)

    # set the range constraints
    xtol = asarray(xtol)
    bounds = zip( x - xtol, x + xtol )

    if debug:
      print "lower: %s" % str(zip(*bounds)[0])
      print "upper: %s" % str(zip(*bounds)[1])

    # optimize where initially x' = x
    stepmon = Monitor()
    if debug: stepmon = VerboseMonitor(1)
    #XXX: edit settings?
    MINMAX = 1 #XXX: confirm MINMAX=1 is minimization
    ftol = ytol
    gtol = None  # use VTRCOG
    if ipop:
      results = diffev2(cost, bounds, ipop, ftol=ftol, gtol=gtol, \
                        itermon = stepmon, maxiter=imax, bounds=bounds, \
                        full_output=1, disp=0, handler=False)
    else:
      results = fmin_powell(cost, x, ftol=ftol, gtol=gtol, \
                            itermon = stepmon, maxiter=imax, bounds=bounds, \
                            full_output=1, disp=0, handler=False)
   #solved = results[0]            # x'
    func_opt = MINMAX * results[1] # cost(x')
    if debug:
      print "solved: %s" % results[0]
      print "cost: %s" % func_opt

    # get the minimum distance |y - F(x')|
    return func_opt
Exemplo n.º 9
0
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2017 The Uncertainty Quantification Foundation.
# License: 3-clause BSD.  The full license text is available at:
#  - https://github.com/uqfoundation/mystic/blob/master/LICENSE

from mystic.models import rosen
from mystic.solvers import *
from mystic.termination import VTRChangeOverGeneration
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import random_seed
random_seed(123)
lb, ub = [-100.]*3, [100]*3
interval = None

if interval:
  _stepmon = VerboseMonitor(interval)
else:
  _stepmon = Monitor()
_term = VTRChangeOverGeneration(generations=200)
_solver = DifferentialEvolutionSolver(3, 20)#40)
_solver.SetRandomInitialPoints(lb,ub)
_solver.SetStrictRanges(lb,ub)
_solver.SetTermination(_term)
_solver.SetGenerationMonitor(_stepmon)
_solver.SetEvaluationLimits(100, 1000)
_solver.Solve(rosen)

_energy = _solver.bestEnergy
_solution =  _solver.bestSolution
_population = _solver.population
Exemplo n.º 10
0
def impose_expectation(param, f, npts, bounds=None, weights=None, **kwds):
    """impose a given expextation value (m +/- D) on a given function f.
Optimiziation on f over the given bounds seeks a mean 'm' with deviation 'D'.
  (this function is not 'mean-, range-, or variance-preserving')

Inputs:
    param -- a tuple of target parameters: param = (mean, deviation)
    f -- a function that takes a list and returns a number
    npts -- a tuple of dimensions of the target product measure
    bounds -- a tuple of sample bounds:   bounds = (lower_bounds, upper_bounds)
    weights -- a list of sample weights

Additional Inputs:
    constraints -- a function that takes a nested list of N x 1D discrete
        measure positions and weights   x' = constraints(x, w)

Outputs:
    samples -- a list of sample positions

For example:
    >>> # provide the dimensions and bounds
    >>> nx = 3;  ny = 2;  nz = 1
    >>> x_lb = [10.0];  y_lb = [0.0];  z_lb = [10.0]
    >>> x_ub = [50.0];  y_ub = [9.0];  z_ub = [90.0]
    >>> 
    >>> # prepare the bounds
    >>> lb = (nx * x_lb) + (ny * y_lb) + (nz * z_lb)
    >>> ub = (nx * x_ub) + (ny * y_ub) + (nz * z_ub)
    >>>
    >>> # generate a list of samples with mean +/- dev imposed
    >>> mean = 2.0;  dev = 0.01
    >>> samples = impose_expectation((mean,dev), f, (nx,ny,nz), (lb,ub))
    >>>
    >>> # test the results by calculating the expectation value for the samples
    >>> expectation(f, samples)
    >>> 2.00001001012246015
"""
    # param[0] is the target mean
    # param[1] is the acceptable deviation from the target mean

    # FIXME: the following is a HACK to recover from lost 'weights' information
    #        we 'mimic' discrete measures using the product measure weights
    # plug in the 'constraints' function:  samples' = constrain(samples, weights)
    constrain = None  # default is no constraints
    if 'constraints' in kwds: constrain = kwds['constraints']
    if not constrain:  # if None (default), there are no constraints
        constraints = lambda x: x
    else:  #XXX: better to use a standard "xk' = constrain(xk)" interface ?

        def constraints(rv):
            coords = _pack(_nested(rv, npts))
            coords = zip(*coords)  # 'mimic' a nested list
            coords = constrain(coords, [weights for i in range(len(coords))])
            coords = zip(*coords)  # revert back to a packed list
            return _flat(_unpack(coords, npts))

    # construct cost function to reduce deviation from expectation value
    def cost(rv):
        """compute cost from a 1-d array of model parameters,
    where:  cost = | E[model] - m |**2 """
        # from mystic.math.measures import _pack, _nested, expectation
        samples = _pack(_nested(rv, npts))
        Ex = expectation(f, samples, weights)
        return (Ex - param[0])**2

    # if bounds are not set, use the default optimizer bounds
    if not bounds:
        lower_bounds = []
        upper_bounds = []
        for n in npts:
            lower_bounds += [None] * n
            upper_bounds += [None] * n
    else:
        lower_bounds, upper_bounds = bounds

    # construct and configure optimizer
    debug = kwds['debug'] if 'debug' in kwds else False
    npop = 200
    maxiter = 1000
    maxfun = 1e+6
    crossover = 0.9
    percent_change = 0.9

    def optimize(cost, (lb, ub), tolerance, _constraints):
        from mystic.solvers import DifferentialEvolutionSolver2
        from mystic.termination import VTR
        from mystic.strategy import Best1Exp
        from mystic.monitors import VerboseMonitor, Monitor
        from mystic.tools import random_seed
        if debug: random_seed(123)
        evalmon = Monitor()
        stepmon = Monitor()
        if debug: stepmon = VerboseMonitor(10)

        ndim = len(lb)
        solver = DifferentialEvolutionSolver2(ndim, npop)
        solver.SetRandomInitialPoints(min=lb, max=ub)
        solver.SetStrictRanges(min=lb, max=ub)
        solver.SetEvaluationLimits(maxiter, maxfun)
        solver.SetEvaluationMonitor(evalmon)
        solver.SetGenerationMonitor(stepmon)
        solver.Solve(cost,termination=VTR(tolerance),strategy=Best1Exp, \
                     CrossProbability=crossover,ScalingFactor=percent_change, \
                     constraints = _constraints)

        solved = solver.Solution()
        diameter_squared = solver.bestEnergy
        func_evals = len(evalmon)
        return solved, diameter_squared, func_evals
Exemplo n.º 11
0
    def optimize(self, optversion, method = 'diff-ev', gtol = 5000, positive_weights: bool = True, x0 = None, bs0 = None, bounds = [0., 1.], noisebiasconstr = False, fb = 1., inv_variance = False, verbose = True, noiseparameter = 1.):
        '''
        Methods: diff-ev, SLSQP
        '''
        
        if verbose:
            print(f'Start optimization with {method}')

            if inv_variance:
                print('Using combined inverse variance weights')

        if x0 is None:
            x0 = []
            if self.lenestimators == 2:
                v = np.random.rand(1)/2
            elif self.lenestimators == 3:
                v = np.random.rand(2)/2
            elif self.lenestimators == 4:
                v = np.random.rand(3)/2
            for a in v:
                x0 += [a]
            x0 += [1.-np.sum(v)]
            x0 = np.array(x0*int(self.nbins))

        if bs0 is None:
            bs0 = np.ones(int(self.nbins))
            norma = self.integerate_discrete(bs0, self.ells_selected)
            bs0 /= norma
        
        dims = (self.lenestimators+1) if not inv_variance else self.lenestimators
        bnds = [(bounds[0], bounds[1]) for i in range(dims*self.nbins)]
        bnds = tuple(bnds)

        if inv_variance:
            x0 = x0
        else:
            x0 = np.append(x0, bs0)

        #if positive_weights:
        #    cons = ({'type': 'eq', 'fun': self.get_constraint()}, {'type': 'ineq', 'fun': self.get_constraint_ineq()})
        #else:
        #    cons = ({'type': 'eq', 'fun': self.get_constraint()})


        weights_name = optversion['weights_name']

        if verbose:
            print(f'Doing for {weights_name}')

        sum_biases_squared = optversion['sum_biases_squared']
        abs_biases = optversion['abs_biases']
        bias_squared = optversion['bias_squared']

        if abs_biases:
            prepare = lambda x: abs(x)
        else:
            prepare = lambda x: x

        f, noisef, biasf = self.get_f_n_b(self.ells_selected, self.theory_selected, self.theta_selected, prepare(self.biases_selected), sum_biases_squared = sum_biases_squared, bias_squared = bias_squared, fb = fb, inv_variance = inv_variance, noiseparameter = noiseparameter)
        self.f = f
        self.noisef = noisef
        self.biasf = biasf

        extra_constraint = lambda x: abs(self.noisef(np.array(x))-self.biasf(np.array(x)))


        def constraint_eq(x):
            x = np.array(x)
            a = self.get_a(x, inv_variance)
            a[:, -1] = 1-np.sum(a[:, :-1], axis = 1)
            if not inv_variance:
                x[:-self.nbins] = a.flatten()
            else:
                x = a.flatten()
            return x

        def penalty1(x):
            x = np.array(x)
            b = x[-self.nbins:]
            res = self.integerate_discrete(b, self.ells_selected)
            return 1-res

        k = 1e20

        if noisebiasconstr:
            gtol = gtol
            @quadratic_equality(condition=penalty1, k = k)
            @quadratic_equality(condition=extra_constraint, k = k)
            def penalty(x):
                return 0.0
        else:
            gtol = gtol
            @quadratic_equality(condition=penalty1, k = k)
            def penalty(x):
                return 0.0       

        if inv_variance:
            penalty = None 
            if noisebiasconstr:
                @quadratic_equality(condition=extra_constraint, k = k)
                def penalty(x):
                    return 0.0

        mon = VerboseMonitor(100)

        func = lambda x: f(np.array(x))
        result = my.diffev(func, x0, npop = 10*len(list(bnds)), bounds = bnds, ftol = 1e-11, gtol = gtol, maxiter = 1024**3, maxfun = 1024**3, constraints = constraint_eq, penalty = penalty, full_output=True, itermon=mon)

        result = Res(result[0], self.ells_selected)
        self.result = result
        
        ws = self.get_weights(result.x, inv_variance, verbose = verbose)        
        weights_per_l = self.get_final_variance_weights(result.x, self.ells_selected, self.theory_selected, self.theta_selected, inv_variance)

        result.set_weights(tuple(list(ws)+[weights_per_l]))
        
        self.monitor = mon
        
        return result