예제 #1
0
from g11 import objective, bounds, xs, xs_, ys

from mystic.penalty import quadratic_equality
from mystic.constraints import with_penalty

@with_penalty(quadratic_equality, k=1e12)
def penalty(x): # == 0.0
    return x[1] - x[0]**2

from mystic.constraints import as_constraint

solver = as_constraint(penalty)



if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective, x0=bounds, bounds=bounds, constraints=solver, npop=40, xtol=1e-8, ftol=1e-8, disp=False, full_output=True)

    assert almostEqual(result[0], xs, tol=1e-2) \
        or almostEqual(result[0], xs_, tol=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)



# EOF
예제 #2
0
def penalty3(x): # <= 0.0
    return 23*x[0] + x[1]**2 + 6*x[5]**2 - 8*x[6] - 196.0

def penalty4(x): # <= 0.0
    return 4*x[0]**2 + x[1]**2 - 3*x[0]*x[1] + 2*x[2]**2 + 5*x[5] - 11*x[6]

@quadratic_inequality(penalty1, k=1e12)
@quadratic_inequality(penalty2, k=1e12)
@quadratic_inequality(penalty3, k=1e12)
@quadratic_inequality(penalty4, k=1e12)
def penalty(x):
    return 0.0

solver = as_constraint(penalty)



if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, npop=40, gtol=200, disp=False, full_output=True)

    assert almostEqual(result[0], xs, rel=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)



# EOF
예제 #3
0
from mystic import reduced


@reduced(lambda x, y: abs(x) + abs(y))  #choice changes answer
def objective(x, a, b, c):
    x, y = x
    eqns = (\
      (x - a[0])**2 + (y - b[0])**2 - c[0]**2,
      (x - a[1])**2 + (y - b[1])**2 - c[1]**2,
      (x - a[2])**2 + (y - b[2])**2 - c[2]**2)
    return eqns


bounds = [(None, None), (None, None)]  #unnecessary

a = (0, 2, 0)
b = (0, 0, 2)
c = (.88, 1, .75)
args = a, b, c

from mystic.solvers import diffev2
from mystic.monitors import VerboseMonitor

mon = VerboseMonitor(10)

result = diffev2(objective, args=args, x0=bounds, bounds=bounds, npop=40, \
                 ftol=1e-8, disp=False, full_output=True, itermon=mon)

print(result[0])
print(result[1])
예제 #4
0
  def radius(model, point, ytol=0.0, xtol=0.0, ipop=None, imax=None):
    """graphical distance between a single point x,y and a model F(x')"""
    # given a single point x,y: find the radius = |y - F(x')| + delta
    # radius is just a minimization over x' of |y - F(x')| + delta
    # where we apply a constraints function (of box constraints) of
    # |x - x'| <= xtol  (for each i in x)
    #
    # if hausdorff = some iterable, delta = |x - x'|/hausdorff
    # if hausdorff = True, delta = |x - x'|/spread(x); using the dataset range
    # if hausdorff = False, delta = 0.0
    #
    # if ipop, then DE else Powell; ytol is used in VTR(ytol)
    # and will terminate when cost <= ytol
    x,y = _get_xy(point)
    y = asarray(y)
    # catch cases where yptp or y will cause issues in normalization
   #if not isfinite(yptp): return 0.0 #FIXME: correct?  shouldn't happen
   #if yptp == 0: from numpy import inf; return inf #FIXME: this is bad

    # build the cost function
    if hausdorff: # distance in all directions
      def cost(rv):
        '''cost = |y - F(x')| + |x - x'| for each x,y (point in dataset)'''
        _y = model(rv)
        if not isfinite(_y): return abs(_y)
        errs = seterr(invalid='ignore', divide='ignore') # turn off warning 
        z = abs((asarray(x) - rv)/ptp)  # normalize by range
        m = abs(y - _y)/yptp            # normalize by range
        seterr(invalid=errs['invalid'], divide=errs['divide']) # turn on warning
        return m + sum(z[isfinite(z)])
    else:  # vertical distance only
      def cost(rv):
        '''cost = |y - F(x')| for each x,y (point in dataset)'''
        return abs(y - model(rv))

    if debug:
      print("rv: %s" % str(x))
      print("cost: %s" % cost(x))

    # if xtol=0, radius is difference in x,y and x,F(x); skip the optimization
    try:
      if not imax or not max(xtol): #iterables
        return cost(x)
    except TypeError:
      if not xtol: #non-iterables
        return cost(x)

    # set the range constraints
    xtol = asarray(xtol)
    bounds = list(zip( x - xtol, x + xtol ))

    if debug:
      print("lower: %s" % str(zip(*bounds)[0]))
      print("upper: %s" % str(zip(*bounds)[1]))

    # optimize where initially x' = x
    stepmon = Monitor()
    if debug: stepmon = VerboseMonitor(1)
    #XXX: edit settings?
    MINMAX = 1 #XXX: confirm MINMAX=1 is minimization
    ftol = ytol
    gtol = None  # use VTRCOG
    if ipop:
      results = diffev2(cost, bounds, ipop, ftol=ftol, gtol=gtol, \
                        itermon = stepmon, maxiter=imax, bounds=bounds, \
                        full_output=1, disp=0, handler=False)
    else:
      results = fmin_powell(cost, x, ftol=ftol, gtol=gtol, \
                            itermon = stepmon, maxiter=imax, bounds=bounds, \
                            full_output=1, disp=0, handler=False)
   #solved = results[0]            # x'
    func_opt = MINMAX * results[1] # cost(x')
    if debug:
      print("solved: %s" % results[0])
      print("cost: %s" % func_opt)

    # get the minimum distance |y - F(x')|
    return func_opt
예제 #5
0
파일: g11_alt.py 프로젝트: vt100/mystic
@with_penalty(quadratic_equality, k=1e12)
def penalty(x):  # == 0.0
    return x[1] - x[0]**2


from mystic.constraints import as_constraint

solver = as_constraint(penalty)

if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     constraints=solver,
                     npop=40,
                     xtol=1e-8,
                     ftol=1e-8,
                     disp=False,
                     full_output=True)

    assert almostEqual(result[0], xs, tol=1e-2) \
        or almostEqual(result[0], xs_, tol=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)

# EOF
예제 #6
0
-x0 + 0.0193*x2 <= 0.0
-x1 + 0.00954*x2 <= 0.0
-pi*x2**2*x3 - (4/3.)*pi*x2**3 + 1296000.0 <= 0.0
x3 - 240.0 <= 0.0
"""
cf = generate_constraint(generate_solvers(simplify(equations)))
pf = generate_penalty(generate_conditions(equations), k=1e12)

if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    from mystic.monitors import VerboseMonitor
    mon = VerboseMonitor(10)

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     constraints=cf,
                     penalty=pf,
                     npop=40,
                     gtol=50,
                     disp=False,
                     full_output=True,
                     itermon=mon)

    assert almostEqual(result[0], xs, rel=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)

# EOF
예제 #7
0
    estimator.fit(X_train, y_train)
    return 1 - estimator.score(X_test, y_test)


bounds = [(0, 10)]
# for the given split, the solution is, roughly:
xs = [0.01213213]
ys = 0.08483955

if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    from mystic.monitors import VerboseMonitor
    mon = VerboseMonitor(10)

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     npop=40,
                     ftol=1e-8,
                     gtol=75,
                     disp=False,
                     full_output=True,
                     itermon=mon)

    assert almostEqual(result[0], xs, rel=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)

# EOF
#   # test fcalls <= maxfun
#   assert my_x[3] <= maxfun
    if maxiter is not None:
        # test iters <= maxiter
        assert my_x[2] <= maxiter
    return

if __name__ == '__main__':
    x0 = [0, 0, 0]

    # check solutions versus results based on the random_seed
    # print "comparing against known results"
    sol = solvers.diffev(rosen, x0, npop=40, disp=0, full_output=True)
    assert almostEqual(sol[1], 0.0020640145337293249, tol=3e-3)
    sol = solvers.diffev2(rosen, x0, npop=40, disp=0, full_output=True)
    assert almostEqual(sol[1], 0.0017516784703663288, tol=3e-3)
    sol = solvers.fmin_powell(rosen, x0, disp=0, full_output=True)
    assert almostEqual(sol[1], 8.3173488898295291e-23)
    sol = solvers.fmin(rosen, x0, disp=0, full_output=True)
    assert almostEqual(sol[1], 1.1605792769954724e-09)

    solver2 = 'diffev2'
    for solver in ['diffev']:
        #   print "comparing %s and %s from mystic" % (solver, solver2)
        test_solvers(solver, solver2, x0, npop=40)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=0)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=1)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=2)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=9)
        test_solvers(solver, solver2, x0, npop=40, maxiter=0)
예제 #9
0
    assert my_x[3] == sp_x[-2]
#   # test fcalls <= maxfun
#   assert my_x[3] <= maxfun
  if maxiter is not None:
    # test iters <= maxiter
    assert my_x[2] <= maxiter
  return 

if __name__ == '__main__':
  x0 = [0,0,0]

  # check solutions versus results based on the random_seed
# print "comparing against known results"
  sol = solvers.diffev(rosen, x0, npop=40, disp=0, full_output=True)
  assert almostEqual(sol[1], 0.0020640145337293249, tol=3e-3)
  sol = solvers.diffev2(rosen, x0, npop=40, disp=0, full_output=True)
  assert almostEqual(sol[1], 0.0017516784703663288, tol=3e-3)
  sol = solvers.fmin_powell(rosen, x0, disp=0, full_output=True)
  assert almostEqual(sol[1], 8.3173488898295291e-23)
  sol = solvers.fmin(rosen, x0, disp=0, full_output=True)
  assert almostEqual(sol[1], 1.1605792769954724e-09)

  solver2 = 'diffev2'
  for solver in ['diffev']:
#   print "comparing %s and %s from mystic" % (solver, solver2)
    test_solvers(solver, solver2, x0, npop=40)
    test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=0)
    test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=1)
    test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=2)
    test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=9)
    test_solvers(solver, solver2, x0, npop=40, maxiter=0)
예제 #10
0
N = 10
b = 5
bounds = [(0, 1)] * N


def objective(x, w):
    s = 0
    for i in range(len(x) - 1):
        for j in range(i, len(x)):
            s += w[i, j] * x[i] * x[j]
    return s


w = np.ones((N, N))  #XXX: replace with actual values of wij

cost = lambda x: -objective(x, w)

c = and_(lambda x: impose_sum(b, x), discrete([0, 1])(lambda x: x))

mon = VerboseMonitor(10)
solution = diffev2(cost,
                   bounds,
                   constraints=c,
                   bounds=bounds,
                   itermon=mon,
                   gtol=50,
                   maxiter=5000,
                   maxfun=50000,
                   npop=10)
print(solution)
예제 #11
0
x0 - 2*x1 - 4.0 <= 0.0
"""
bounds = [(None, None),(0.0, None)]

# with penalty='penalty' applied, solution is:
xs = [0.5, 1.5]
ys = 2.5

from mystic.symbolic import generate_conditions, generate_penalty
pf = generate_penalty(generate_conditions(equations), k=1e3)
from mystic.symbolic import generate_constraint, generate_solvers, simplify
cf = generate_constraint(generate_solvers(simplify(equations)))



if __name__ == '__main__':

  from mystic.solvers import diffev2, fmin_powell
  from mystic.math import almostEqual

  result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraint=cf, npop=40, disp=False, full_output=True, ftol=1e-10, gtol=100)
  assert almostEqual(result[0], xs, rel=1e-2)
  assert almostEqual(result[1], ys, rel=1e-2)

  result = fmin_powell(objective, x0=[0.0,0.0], bounds=bounds, penalty=pf, constraint=cf, disp=False, full_output=True, gtol=3)
  assert almostEqual(result[0], xs, rel=1e-2)
  assert almostEqual(result[1], ys, rel=1e-2)


# EOF
예제 #12
0
def impose_valid(cutoff, model, guess=None, **kwds):
  """impose model validity on a given list of parameters w,x,y

Optimization on w,x,y over the given bounds seeks sum(infeasibility) = 0.
  (this function is not ???-preserving)

Inputs:
    cutoff -- maximum acceptable model invalidity |y - F(x')|; a single value
    model -- the model function, y' = F(x'), that approximates reality, y = G(x)
    guess -- the scenario providing an initial guess at validity,
        or a tuple of dimensions of the target scenario

Additional Inputs:
    hausdorff -- norm; where if given, ytol = |y - F(x')| + |x - x'|/norm
    xtol -- acceptable pointwise graphical distance of model from reality
    tol -- acceptable optimizer termination before sum(infeasibility) = 0.
    bounds -- a tuple of sample bounds:   bounds = (lower_bounds, upper_bounds)
    constraints -- a function that takes a flat list parameters
        x' = constraints(x)

Outputs:
    pm -- a scenario with desired model validity

Notes:
    xtol defines the n-dimensional base of a pilar of height cutoff, centered at
    each point. The region inside the pilar defines the space where a "valid"
    model must intersect. If xtol is not specified, then the base of the pilar
    will be a dirac at x' = x. This function performs an optimization to find
    a set of points where the model is valid. Here, tol is used to set the
    optimization termination for the sum(graphical_distances), while cutoff is
    used in defining the graphical_distance between x,y and x',F(x').
"""
  from numpy import sum as _sum, asarray
  from mystic.math.distance import graphical_distance, infeasibility, _npts
  if guess is None:
    message = "Requires a guess scenario, or a tuple of scenario dimensions."
    raise TypeError, message
  # get initial guess
  if hasattr(guess, 'pts'): # guess is a scenario
    pts = guess.pts    # number of x
    guess = guess.flatten(all=True)
  else:
    pts = guess        # guess is given as a tuple of 'pts'
    guess = None
  npts = _npts(pts)    # number of Y

  # prepare bounds for solver
  bounds = kwds.pop('bounds', None)
  # if bounds are not set, use the default optimizer bounds
  if bounds is None:
    lower_bounds = []; upper_bounds = []
    for n in pts:  # bounds for n*x in each dimension  (x2 due to weights)
      lower_bounds += [None]*n * 2
      upper_bounds += [None]*n * 2
    # also need bounds for npts*y values
    lower_bounds += [None]*npts
    upper_bounds += [None]*npts
    bounds = lower_bounds, upper_bounds
  bounds = asarray(bounds).T

  # plug in the 'constraints' function:  param' = constraints(param)
  constraints = kwds.pop('constraints', None) # default is no constraints
  if not constraints:  # if None (default), there are no constraints
    constraints = lambda x: x

  # 'wiggle room' tolerances
  ipop = kwds.pop('ipop', 10) #XXX: tune ipop (inner optimization)?
  imax = kwds.pop('imax', 10) #XXX: tune imax (inner optimization)?
  # tolerance for optimization on sum(y)
  tol = kwds.pop('tol', 0.0) # default
  npop = kwds.pop('npop', 20) #XXX: tune npop (outer optimization)?
  maxiter = kwds.pop('maxiter', 1000) #XXX: tune maxiter (outer optimization)?

  # if no guess was made, then use bounds constraints
  if guess is None:
    if npop:
      guess = bounds
    else:  # fmin_powell needs a list params (not bounds)
      guess = [(a + b)/2. for (a,b) in bounds]

  # construct cost function to reduce sum(infeasibility)
  def cost(rv):
    """compute cost from a 1-d array of model parameters,
    where: cost = | sum( infeasibility ) | """
    # converting rv to scenario
    points = scenario()
    points.load(rv, pts)
    # calculate infeasibility
    Rv = graphical_distance(model, points, ytol=cutoff, ipop=ipop, \
                                                        imax=imax, **kwds)
    v = infeasibility(Rv, cutoff)
    # converting v to E
    return _sum(v) #XXX: abs ?

  # construct and configure optimizer
  debug = False  #!!!
  maxfun = 1e+6
  crossover = 0.9; percent_change = 0.8
  ftol = abs(tol); gtol = None #XXX: optimally, should be VTRCOG...

  if debug:
    print "lower bounds: %s" % bounds.T[0]
    print "upper bounds: %s" % bounds.T[1]
  # print "initial value: %s" % guess
  # use optimization to get model-valid points
  from mystic.solvers import diffev2, fmin_powell
  from mystic.monitors import Monitor, VerboseMonitor
  from mystic.strategy import Best1Bin, Best1Exp
  evalmon = Monitor();  stepmon = Monitor(); strategy = Best1Exp
  if debug: stepmon = VerboseMonitor(2)  #!!!
  if npop: # use VTR
    results = diffev2(cost, guess, npop, ftol=ftol, gtol=gtol, bounds=bounds,\
                      maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
                      cross=crossover, scale=percent_change, strategy=strategy,\
                      evalmon=evalmon, itermon=stepmon,\
                      full_output=1, disp=0, handler=False)
  else: # use VTR
    results = fmin_powell(cost, guess, ftol=ftol, gtol=gtol, bounds=bounds,\
                      maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
                      evalmon=evalmon, itermon=stepmon,\
                      full_output=1, disp=0, handler=False)
  # repack the results
  pm = scenario()
  pm.load(results[0], pts)            # params: w,x,y
 #if debug: print "final cost: %s" % results[1]
  if debug and results[2] >= maxiter: # iterations
    print "Warning: constraints solver terminated at maximum iterations"
 #func_evals = results[3]           # evaluation
  return pm
예제 #13
0
def impose_feasible(cutoff, data, guess=None, **kwds):
  """impose shortness on a given list of parameters w,x,y.

Optimization on w,x,y over the given bounds seeks sum(infeasibility) = 0.
  (this function is not ???-preserving)

Inputs:
    cutoff -- maximum acceptable deviation from shortness
    data -- a dataset of observed points (these points are 'static')
    guess -- the scenario providing an initial guess at feasibility,
        or a tuple of dimensions of the target scenario

Additional Inputs:
    tol -- acceptable optimizer termination before sum(infeasibility) = 0.
    bounds -- a tuple of sample bounds:   bounds = (lower_bounds, upper_bounds)
    constraints -- a function that takes a flat list parameters
        x' = constraints(x)

Outputs:
    pm -- a scenario with desired shortness
"""
  from numpy import sum, asarray
  from mystic.math.legacydata import dataset
  from mystic.math.distance import lipschitz_distance, infeasibility, _npts
  if guess is None:
    message = "Requires a guess scenario, or a tuple of scenario dimensions."
    raise TypeError, message
  # get initial guess
  if hasattr(guess, 'pts'): # guess is a scenario
    pts = guess.pts    # number of x
    guess = guess.flatten(all=True)
  else:
    pts = guess        # guess is given as a tuple of 'pts'
    guess = None
  npts = _npts(pts)    # number of Y
  long_form = len(pts) - list(pts).count(2) # can use '2^K compressed format'

  # prepare bounds for solver
  bounds = kwds.pop('bounds', None)
  # if bounds are not set, use the default optimizer bounds
  if bounds is None:
    lower_bounds = []; upper_bounds = []
    for n in pts:  # bounds for n*x in each dimension  (x2 due to weights)
      lower_bounds += [None]*n * 2
      upper_bounds += [None]*n * 2
    # also need bounds for npts*y values
    lower_bounds += [None]*npts
    upper_bounds += [None]*npts
    bounds = lower_bounds, upper_bounds
  bounds = asarray(bounds).T

  # plug in the 'constraints' function:  param' = constraints(param)
  # constraints should impose_mean(y,w), and possibly sum(weights)
  constraints = kwds.pop('constraints', None) # default is no constraints
  if not constraints:  # if None (default), there are no constraints
    constraints = lambda x: x

  _self = kwds.pop('with_self', True) # default includes self in shortness
  if _self is not False: _self = True
  # tolerance for optimization on sum(y)
  tol = kwds.pop('tol', 0.0) # default
  npop = kwds.pop('npop', 20) #XXX: tune npop?
  maxiter = kwds.pop('maxiter', 1000) #XXX: tune maxiter?

  # if no guess was made, then use bounds constraints
  if guess is None:
    if npop:
      guess = bounds
    else:  # fmin_powell needs a list params (not bounds)
      guess = [(a + b)/2. for (a,b) in bounds]

  # construct cost function to reduce sum(lipschitz_distance)
  def cost(rv):
    """compute cost from a 1-d array of model parameters,
    where:  cost = | sum(lipschitz_distance) | """
    _data = dataset()
    _pm = scenario()
    _pm.load(rv, pts)      # here rv is param: w,x,y
    if not long_form:
      positions = _pm.select(*range(npts))
    else: positions = _pm.positions
    _data.load( data.coords, data.values )                   # LOAD static
    if _self:
      _data.load( positions, _pm.values )                    # LOAD dynamic
    _data.lipschitz = data.lipschitz                         # LOAD L
    Rv = lipschitz_distance(_data.lipschitz, _pm, _data, tol=cutoff, **kwds)
    v = infeasibility(Rv, cutoff)
    return abs(sum(v))

  # construct and configure optimizer
  debug = False  #!!!
  maxfun = 1e+6
  crossover = 0.9; percent_change = 0.9
  ftol = abs(tol); gtol = None

  if debug:
    print "lower bounds: %s" % bounds.T[0]
    print "upper bounds: %s" % bounds.T[1]
  # print "initial value: %s" % guess
  # use optimization to get feasible points
  from mystic.solvers import diffev2, fmin_powell
  from mystic.monitors import Monitor, VerboseMonitor
  from mystic.strategy import Best1Bin, Best1Exp
  evalmon = Monitor();  stepmon = Monitor(); strategy = Best1Exp
  if debug: stepmon = VerboseMonitor(10)  #!!!
  if npop: # use VTR
    results = diffev2(cost, guess, npop, ftol=ftol, gtol=gtol, bounds=bounds,\
                      maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
                      cross=crossover, scale=percent_change, strategy=strategy,\
                      evalmon=evalmon, itermon=stepmon,\
                      full_output=1, disp=0, handler=False)
  else: # use VTR
    results = fmin_powell(cost, guess, ftol=ftol, gtol=gtol, bounds=bounds,\
                      maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
                      evalmon=evalmon, itermon=stepmon,\
                      full_output=1, disp=0, handler=False)
  # repack the results
  pm = scenario()
  pm.load(results[0], pts)            # params: w,x,y
 #if debug: print "final cost: %s" % results[1]
  if debug and results[2] >= maxiter: # iterations
    print "Warning: constraints solver terminated at maximum iterations"
 #func_evals = results[3]           # evaluation
  return pm
예제 #14
0
#x[3] is the slack variable

def func_value(d):
    curve_vec=[]
    for val in d:
        curve = (0.3 * val) + ((2 * (val ** (3/2))) / 3)
        curve_vec.append(curve)
    return curve_vec

def func(x):
    curve = func_value(x[0:3])
    return -(sum(np.dot(curve,production))-Q+x[3])

objective = lambda x: sum(np.dot(x[0:3],C))+1000*x[3]     

constraint = lambda x: func(x)

@quadratic_inequality(constraint)
def penalty(x):
    return 0.0


mon = VerboseMonitor(50)
solution = diffev2(objective,x0,penalty=penalty,bounds=bounds,itermon=mon,gtol=100, maxiter=1000, maxfun=10000, npop=40)
print(solution)

mon = VerboseMonitor(50)
solution = fmin_powell(objective,x0,penalty=penalty,bounds=bounds,itermon=mon,gtol=100, maxiter=1000, maxfun=10000)
print(solution)

예제 #15
0
# 'solution' is:
xs  = polyfit(x, y, 1) 
xs_ = polyfit(x, y, 2) 
ys  = objective(xs, x, y)
ys_ = objective(xs_, x, y)


if __name__ == '__main__':

  from mystic.solvers import diffev2, fmin_powell
  from mystic.math import almostEqual
# from mystic.monitors import VerboseMonitor
# mon = VerboseMonitor(10)

  result = diffev2(objective, args=args, x0=bounds, bounds=bounds, npop=40, ftol=1e-8, gtol=100, disp=False, full_output=True)#, itermon=mon)
# print result[0], xs
  assert almostEqual(result[0], xs, rel=1e-1)
  assert almostEqual(result[1], ys, rel=1e-1)

  result = fmin_powell(objective, args=args, x0=[0.0,0.0], bounds=bounds, disp=False, full_output=True)
# print result[0], xs
  assert almostEqual(result[0], xs, rel=1e-1)
  assert almostEqual(result[1], ys, rel=1e-1)

# mon = VerboseMonitor(10)
  result = diffev2(objective, args=args, x0=bounds_, bounds=bounds_, npop=40, ftol=1e-8, gtol=100, disp=False, full_output=True)#, itermon=mon)
# print result[0], xs_
  assert almostEqual(result[0], xs_, tol=1e-1)
  assert almostEqual(result[1], ys_, rel=1e-1)
예제 #16
0
파일: eq10.py 프로젝트: uqfoundation/mystic
1394152 + 66920*x0 + 55679*x3 - 64234*x1 - 65337*x2 - 45581*x4 - 67707*x5 - 98038*x6 == 0.0
68550*x0 + 27886*x1 + 31716*x2 + 73597*x3 + 38835*x6 - 279091 - 88963*x4 - 76391*x5 == 0.0
76132*x1 + 71860*x2 + 22770*x3 + 68211*x4 + 78587*x5 - 480923 - 48224*x0 - 82817*x6 == 0.0
519878 + 94198*x1 + 87234*x2 + 37498*x3 - 71583*x0 - 25728*x4 - 25495*x5 - 70023*x6 == 0.0
361921 + 78693*x0 + 38592*x4 + 38478*x5 - 94129*x1 - 43188*x2 - 82528*x3 - 69025*x6 == 0.0
"""

from mystic.symbolic import generate_penalty, generate_conditions
pf = generate_penalty(generate_conditions(equations))
from mystic.symbolic import generate_constraint, generate_solvers, solve
cf = generate_constraint(generate_solvers(solve(equations)))

from numpy import round as npround


if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

   #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, npop=20, gtol=50, disp=True, full_output=True)
   #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=npround, npop=40, gtol=50, disp=True, full_output=True)
    result = diffev2(objective, x0=bounds, bounds=bounds, constraints=cf, npop=4, gtol=1, disp=True, full_output=True)

    print(result[0])
    assert almostEqual(result[0], xs, tol=1e-8) #XXX: fails b/c rel & zero?
    assert almostEqual(result[1], ys, tol=1e-4)


# EOF
예제 #17
0
"""
Maximization with a boolean variable and constraints.
"""
from mystic.solvers import diffev2
from mystic.monitors import VerboseMonitor
from mystic.constraints import impose_sum, discrete, and_
import numpy as np

N = 10
b = 5
bounds = [(0,1)] * N

def objective(x, w):
    s = 0
    for i in range(len(x)-1):
        for j in range(i, len(x)):
            s += w[i,j] * x[i] * x[j]
    return s


w = np.ones((N,N)) #XXX: replace with actual values of wij

cost = lambda x: -objective(x, w)

c = and_(lambda x: impose_sum(b, x), discrete([0,1])(lambda x:x))

mon = VerboseMonitor(10)
solution = diffev2(cost,bounds,constraints=c,bounds=bounds,itermon=mon,gtol=50, maxiter=5000, maxfun=50000, npop=10)
print(solution)

예제 #18
0
objective = lambda x: sum(np.dot(x[0:3], C)) + 1000 * x[3]

constraint = lambda x: func(x)


@quadratic_inequality(constraint)
def penalty(x):
    return 0.0


mon = VerboseMonitor(50)
solution = diffev2(objective,
                   x0,
                   penalty=penalty,
                   bounds=bounds,
                   itermon=mon,
                   gtol=100,
                   maxiter=1000,
                   maxfun=10000,
                   npop=40)
print(solution)

mon = VerboseMonitor(50)
solution = fmin_powell(objective,
                       x0,
                       penalty=penalty,
                       bounds=bounds,
                       itermon=mon,
                       gtol=100,
                       maxiter=1000,
                       maxfun=10000)
예제 #19
0
파일: distance.py 프로젝트: Magellen/mystic
  def radius(model, point, ytol=0.0, xtol=0.0, ipop=None, imax=None):
    """graphical distance between a single point x,y and a model F(x')"""
    # given a single point x,y: find the radius = |y - F(x')| + delta
    # radius is just a minimization over x' of |y - F(x')| + delta
    # where we apply a constraints function (of box constraints) of
    # |x - x'| <= xtol  (for each i in x)
    #
    # if hausdorff = some iterable, delta = |x - x'|/hausdorff
    # if hausdorff = True, delta = |x - x'|/spread(x); using the dataset range
    # if hausdorff = False, delta = 0.0
    #
    # if ipop, then DE else Powell; ytol is used in VTR(ytol)
    # and will terminate when cost <= ytol
    x,y = _get_xy(point)
    y = asarray(y)
    # catch cases where yptp or y will cause issues in normalization
   #if not isfinite(yptp): return 0.0 #FIXME: correct?  shouldn't happen
   #if yptp == 0: from numpy import inf; return inf #FIXME: this is bad

    # build the cost function
    if hausdorff: # distance in all directions
      def cost(rv):
        '''cost = |y - F(x')| + |x - x'| for each x,y (point in dataset)'''
        _y = model(rv)
        if not isfinite(_y): return abs(_y)
        errs = seterr(invalid='ignore', divide='ignore') # turn off warning 
        z = abs((asarray(x) - rv)/ptp)  # normalize by range
        m = abs(y - _y)/yptp            # normalize by range
        seterr(invalid=errs['invalid'], divide=errs['divide']) # turn on warning
        return m + sum(z[isfinite(z)])
    else:  # vertical distance only
      def cost(rv):
        '''cost = |y - F(x')| for each x,y (point in dataset)'''
        return abs(y - model(rv))

    if debug:
      print("rv: %s" % str(x))
      print("cost: %s" % cost(x))

    # if xtol=0, radius is difference in x,y and x,F(x); skip the optimization
    try:
      if not imax or not max(xtol): #iterables
        return cost(x)
    except TypeError:
      if not xtol: #non-iterables
        return cost(x)

    # set the range constraints
    xtol = asarray(xtol)
    bounds = list(zip( x - xtol, x + xtol ))

    if debug:
      print("lower: %s" % str(zip(*bounds)[0]))
      print("upper: %s" % str(zip(*bounds)[1]))

    # optimize where initially x' = x
    stepmon = Monitor()
    if debug: stepmon = VerboseMonitor(1)
    #XXX: edit settings?
    MINMAX = 1 #XXX: confirm MINMAX=1 is minimization
    ftol = ytol
    gtol = None  # use VTRCOG
    if ipop:
      results = diffev2(cost, bounds, ipop, ftol=ftol, gtol=gtol, \
                        itermon = stepmon, maxiter=imax, bounds=bounds, \
                        full_output=1, disp=0, handler=False)
    else:
      results = fmin_powell(cost, x, ftol=ftol, gtol=gtol, \
                            itermon = stepmon, maxiter=imax, bounds=bounds, \
                            full_output=1, disp=0, handler=False)
   #solved = results[0]            # x'
    func_opt = MINMAX * results[1] # cost(x')
    if debug:
      print("solved: %s" % results[0])
      print("cost: %s" % func_opt)

    # get the minimum distance |y - F(x')|
    return func_opt
예제 #20
0
파일: discrete.py 프로젝트: agamdua/mystic
def impose_feasible(cutoff, data, guess=None, **kwds):
  """impose shortness on a given list of parameters w,x,y.

Optimization on w,x,y over the given bounds seeks sum(infeasibility) = 0.
  (this function is not ???-preserving)

Inputs:
    cutoff -- maximum acceptable deviation from shortness
    data -- a dataset of observed points (these points are 'static')
    guess -- the scenario providing an initial guess at feasibility,
        or a tuple of dimensions of the target scenario

Additional Inputs:
    tol -- acceptable optimizer termination before sum(infeasibility) = 0.
    bounds -- a tuple of sample bounds:   bounds = (lower_bounds, upper_bounds)
    constraints -- a function that takes a flat list parameters
        x' = constraints(x)

Outputs:
    pm -- a scenario with desired shortness
"""
  from numpy import sum, asarray
  from mystic.math.legacydata import dataset
  from mystic.math.distance import lipschitz_distance, infeasibility, _npts
  if guess is None:
    message = "Requires a guess scenario, or a tuple of scenario dimensions."
    raise TypeError, message
  # get initial guess
  if hasattr(guess, 'pts'): # guess is a scenario
    pts = guess.pts    # number of x
    guess = guess.flatten(all=True)
  else:
    pts = guess        # guess is given as a tuple of 'pts'
    guess = None
  npts = _npts(pts)    # number of Y
  long_form = len(pts) - list(pts).count(2) # can use '2^K compressed format'

  # prepare bounds for solver
  bounds = kwds.pop('bounds', None)
  # if bounds are not set, use the default optimizer bounds
  if bounds is None:
    lower_bounds = []; upper_bounds = []
    for n in pts:  # bounds for n*x in each dimension  (x2 due to weights)
      lower_bounds += [None]*n * 2
      upper_bounds += [None]*n * 2
    # also need bounds for npts*y values
    lower_bounds += [None]*npts
    upper_bounds += [None]*npts
    bounds = lower_bounds, upper_bounds
  bounds = asarray(bounds).T

  # plug in the 'constraints' function:  param' = constraints(param)
  # constraints should impose_mean(y,w), and possibly sum(weights)
  constraints = kwds.pop('constraints', None) # default is no constraints
  if not constraints:  # if None (default), there are no constraints
    constraints = lambda x: x

  _self = kwds.pop('with_self', True) # default includes self in shortness
  if _self is not False: _self = True
  # tolerance for optimization on sum(y)
  tol = kwds.pop('tol', 0.0) # default
  npop = kwds.pop('npop', 20) #XXX: tune npop?
  maxiter = kwds.pop('maxiter', 1000) #XXX: tune maxiter?

  # if no guess was made, then use bounds constraints
  if guess is None:
    if npop:
      guess = bounds
    else:  # fmin_powell needs a list params (not bounds)
      guess = [(a + b)/2. for (a,b) in bounds]

  # construct cost function to reduce sum(lipschitz_distance)
  def cost(rv):
    """compute cost from a 1-d array of model parameters,
    where:  cost = | sum(lipschitz_distance) | """
    _data = dataset()
    _pm = scenario()
    _pm.load(rv, pts)      # here rv is param: w,x,y
    if not long_form:
      positions = _pm.select(*range(npts))
    else: positions = _pm.positions
    _data.load( data.coords, data.values )                   # LOAD static
    if _self:
      _data.load( positions, _pm.values )                    # LOAD dynamic
    _data.lipschitz = data.lipschitz                         # LOAD L
    Rv = lipschitz_distance(_data.lipschitz, _pm, _data, tol=cutoff, **kwds)
    v = infeasibility(Rv, cutoff)
    return abs(sum(v))

  # construct and configure optimizer
  debug = False  #!!!
  maxfun = 1e+6
  crossover = 0.9; percent_change = 0.9
  ftol = abs(tol); gtol = None

  if debug:
    print "lower bounds: %s" % bounds.T[0]
    print "upper bounds: %s" % bounds.T[1]
  # print "initial value: %s" % guess
  # use optimization to get feasible points
  from mystic.solvers import diffev2, fmin_powell
  from mystic.monitors import Monitor, VerboseMonitor
  from mystic.strategy import Best1Bin, Best1Exp
  evalmon = Monitor();  stepmon = Monitor(); strategy = Best1Exp
  if debug: stepmon = VerboseMonitor(10)  #!!!
  if npop: # use VTR
    results = diffev2(cost, guess, npop, ftol=ftol, gtol=gtol, bounds=bounds,\
                      maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
                      cross=crossover, scale=percent_change, strategy=strategy,\
                      evalmon=evalmon, itermon=stepmon,\
                      full_output=1, disp=0, handler=False)
  else: # use VTR
    results = fmin_powell(cost, guess, ftol=ftol, gtol=gtol, bounds=bounds,\
                      maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
                      evalmon=evalmon, itermon=stepmon,\
                      full_output=1, disp=0, handler=False)
  # repack the results
  pm = scenario()
  pm.load(results[0], pts)            # params: w,x,y
 #if debug: print "final cost: %s" % results[1]
  if debug and results[2] >= maxiter: # iterations
    print "Warning: constraints solver terminated at maximum iterations"
 #func_evals = results[3]           # evaluation
  return pm
예제 #21
0
xs_ = polyfit(x, y, 2)
ys = objective(xs, x, y)
ys_ = objective(xs_, x, y)

if __name__ == '__main__':

    from mystic.solvers import diffev2, fmin_powell
    from mystic.math import almostEqual
    # from mystic.monitors import VerboseMonitor
    # mon = VerboseMonitor(10)

    result = diffev2(objective,
                     args=args,
                     x0=bounds,
                     bounds=bounds,
                     npop=40,
                     ftol=1e-8,
                     gtol=100,
                     disp=False,
                     full_output=True)  #, itermon=mon)
    # print("%s %s" % (result[0], xs))
    assert almostEqual(result[0], xs, rel=1e-1)
    assert almostEqual(result[1], ys, rel=1e-1)

    result = fmin_powell(objective,
                         args=args,
                         x0=[0.0, 0.0],
                         bounds=bounds,
                         disp=False,
                         full_output=True)
    # print("%s %s" % (result[0], xs))
예제 #22
0
파일: discrete.py 프로젝트: agamdua/mystic
def impose_valid(cutoff, model, guess=None, **kwds):
  """impose model validity on a given list of parameters w,x,y

Optimization on w,x,y over the given bounds seeks sum(infeasibility) = 0.
  (this function is not ???-preserving)

Inputs:
    cutoff -- maximum acceptable model invalidity |y - F(x')|; a single value
    model -- the model function, y' = F(x'), that approximates reality, y = G(x)
    guess -- the scenario providing an initial guess at validity,
        or a tuple of dimensions of the target scenario

Additional Inputs:
    hausdorff -- norm; where if given, ytol = |y - F(x')| + |x - x'|/norm
    xtol -- acceptable pointwise graphical distance of model from reality
    tol -- acceptable optimizer termination before sum(infeasibility) = 0.
    bounds -- a tuple of sample bounds:   bounds = (lower_bounds, upper_bounds)
    constraints -- a function that takes a flat list parameters
        x' = constraints(x)

Outputs:
    pm -- a scenario with desired model validity

Notes:
    xtol defines the n-dimensional base of a pilar of height cutoff, centered at
    each point. The region inside the pilar defines the space where a "valid"
    model must intersect. If xtol is not specified, then the base of the pilar
    will be a dirac at x' = x. This function performs an optimization to find
    a set of points where the model is valid. Here, tol is used to set the
    optimization termination for the sum(graphical_distances), while cutoff is
    used in defining the graphical_distance between x,y and x',F(x').
"""
  from numpy import sum as _sum, asarray
  from mystic.math.distance import graphical_distance, infeasibility, _npts
  if guess is None:
    message = "Requires a guess scenario, or a tuple of scenario dimensions."
    raise TypeError, message
  # get initial guess
  if hasattr(guess, 'pts'): # guess is a scenario
    pts = guess.pts    # number of x
    guess = guess.flatten(all=True)
  else:
    pts = guess        # guess is given as a tuple of 'pts'
    guess = None
  npts = _npts(pts)    # number of Y

  # prepare bounds for solver
  bounds = kwds.pop('bounds', None)
  # if bounds are not set, use the default optimizer bounds
  if bounds is None:
    lower_bounds = []; upper_bounds = []
    for n in pts:  # bounds for n*x in each dimension  (x2 due to weights)
      lower_bounds += [None]*n * 2
      upper_bounds += [None]*n * 2
    # also need bounds for npts*y values
    lower_bounds += [None]*npts
    upper_bounds += [None]*npts
    bounds = lower_bounds, upper_bounds
  bounds = asarray(bounds).T

  # plug in the 'constraints' function:  param' = constraints(param)
  constraints = kwds.pop('constraints', None) # default is no constraints
  if not constraints:  # if None (default), there are no constraints
    constraints = lambda x: x

  # 'wiggle room' tolerances
  ipop = kwds.pop('ipop', 10) #XXX: tune ipop (inner optimization)?
  imax = kwds.pop('imax', 10) #XXX: tune imax (inner optimization)?
  # tolerance for optimization on sum(y)
  tol = kwds.pop('tol', 0.0) # default
  npop = kwds.pop('npop', 20) #XXX: tune npop (outer optimization)?
  maxiter = kwds.pop('maxiter', 1000) #XXX: tune maxiter (outer optimization)?

  # if no guess was made, then use bounds constraints
  if guess is None:
    if npop:
      guess = bounds
    else:  # fmin_powell needs a list params (not bounds)
      guess = [(a + b)/2. for (a,b) in bounds]

  # construct cost function to reduce sum(infeasibility)
  def cost(rv):
    """compute cost from a 1-d array of model parameters,
    where: cost = | sum( infeasibility ) | """
    # converting rv to scenario
    points = scenario()
    points.load(rv, pts)
    # calculate infeasibility
    Rv = graphical_distance(model, points, ytol=cutoff, ipop=ipop, \
                                                        imax=imax, **kwds)
    v = infeasibility(Rv, cutoff)
    # converting v to E
    return _sum(v) #XXX: abs ?

  # construct and configure optimizer
  debug = False  #!!!
  maxfun = 1e+6
  crossover = 0.9; percent_change = 0.8
  ftol = abs(tol); gtol = None #XXX: optimally, should be VTRCOG...

  if debug:
    print "lower bounds: %s" % bounds.T[0]
    print "upper bounds: %s" % bounds.T[1]
  # print "initial value: %s" % guess
  # use optimization to get model-valid points
  from mystic.solvers import diffev2, fmin_powell
  from mystic.monitors import Monitor, VerboseMonitor
  from mystic.strategy import Best1Bin, Best1Exp
  evalmon = Monitor();  stepmon = Monitor(); strategy = Best1Exp
  if debug: stepmon = VerboseMonitor(2)  #!!!
  if npop: # use VTR
    results = diffev2(cost, guess, npop, ftol=ftol, gtol=gtol, bounds=bounds,\
                      maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
                      cross=crossover, scale=percent_change, strategy=strategy,\
                      evalmon=evalmon, itermon=stepmon,\
                      full_output=1, disp=0, handler=False)
  else: # use VTR
    results = fmin_powell(cost, guess, ftol=ftol, gtol=gtol, bounds=bounds,\
                      maxiter=maxiter, maxfun=maxfun, constraints=constraints,\
                      evalmon=evalmon, itermon=stepmon,\
                      full_output=1, disp=0, handler=False)
  # repack the results
  pm = scenario()
  pm.load(results[0], pts)            # params: w,x,y
 #if debug: print "final cost: %s" % results[1]
  if debug and results[2] >= maxiter: # iterations
    print "Warning: constraints solver terminated at maximum iterations"
 #func_evals = results[3]           # evaluation
  return pm
예제 #23
0

if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    from mystic.monitors import Monitor, VerboseMonitor
    mon = VerboseMonitor(10)  #,10)

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     penalty=pf,
                     constraints=constraint,
                     npop=52,
                     ftol=1e-8,
                     gtol=1000,
                     disp=True,
                     full_output=True,
                     cross=0.1,
                     scale=0.9,
                     itermon=mon)
    # FIXME: solves at 0%... but w/ vowels fixed 80%?
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=52, ftol=1e-8, gtol=2000, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=130, ftol=1e-8, gtol=1000, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=260, ftol=1e-8, gtol=500, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=520, ftol=1e-8, gtol=100, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)

    print result[0]
    assert almostEqual(result[0], xs, tol=1e-8)
    assert almostEqual(result[1], ys, tol=1e-4)
cf = generate_constraint(generate_solvers(simplify(equations)))

from mystic.constraints import integers


@integers()
def round(x):
    return x


if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     penalty=pf,
                     constraints=round,
                     npop=20,
                     gtol=50,
                     disp=True,
                     full_output=True)

    print result[0]
    assert almostEqual(result[0], xs, tol=1e-8)  #XXX: fails b/c rel & zero?
    assert almostEqual(result[1], ys, tol=1e-4)

# EOF
예제 #25
0
파일: g03.py 프로젝트: nadiiaaii/mystic
def cf(len=3):
    return generate_constraint(generate_solvers(solve(equations(len))))


def pf(len=3):
    return generate_penalty(generate_conditions(equations(len)))


if __name__ == '__main__':
    x = xs(10)
    y = ys(len(x))
    bounds = bounds(len(x))
    cf = cf(len(x))

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     constraints=cf,
                     npop=40,
                     gtol=500,
                     disp=False,
                     full_output=True)

    assert almostEqual(result[0], x, tol=1e-2)
    assert almostEqual(result[1], y, tol=1e-2)

# EOF
예제 #26
0
파일: g03.py 프로젝트: Magellen/mystic
def equations(len=3):
    eqn = "\nsum(["
    for i in range(len):
        eqn += 'x%s**2, ' % str(i)
    return eqn[:-2]+"]) - 1.0 = 0.0\n"

def cf(len=3):
    return generate_constraint(generate_solvers(solve(equations(len))))
def pf(len=3):
    return generate_penalty(generate_conditions(equations(len)))



if __name__ == '__main__':
    x = xs(10)
    y = ys(len(x))
    bounds = bounds(len(x))
    cf = cf(len(x))

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective, x0=bounds, bounds=bounds, constraints=cf, npop=40, gtol=500, disp=False, full_output=True)

    assert almostEqual(result[0], x, tol=1e-2)
    assert almostEqual(result[1], y, tol=1e-2)



# EOF
예제 #27
0
solver = as_constraint(penalty)
#solver = discrete(range(11))(solver)  #XXX: MOD = range(11) instead of LARGE
#FIXME: constrain to 'int' with discrete is very fragile!  required #MODs

def constraint(x):
    from numpy import round
    return round(solver(x))

# better is to constrain to integers, penalize otherwise
from mystic.constraints import integers

@integers()
def round(x):
  return x


if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, constraints=round, npop=30, gtol=50, disp=True, full_output=True)

    print(result[0])
    assert almostEqual(result[0], xs, tol=1e-8) #XXX: fails b/c rel & zero?
    assert almostEqual(result[1], ys, tol=1e-4)


# EOF
예제 #28
0
from mystic.constraints import unique

from numpy import round, hstack, clip
def constraint(x):
    x = round(x).astype(int) # force round and convert type to int
    x = clip(x, 1,n)         #XXX: impose bounds
    x = unique(x, range(1,n+1))
    return x


if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    from mystic.monitors import Monitor, VerboseMonitor
    mon = VerboseMonitor(10)#,10)

    result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, constraints=constraint, npop=50, ftol=1e-8, gtol=200, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)

    print result[0]
    assert almostEqual(result[0], xs[0], tol=1e-8) \
        or almostEqual(result[0], xs[1], tol=1e-8) \
        or almostEqual(result[0], xs[2], tol=1e-8) \
        or almostEqual(result[0], xs[3], tol=1e-8) \
        or almostEqual(result[0], xs[4], tol=1e-8) \
        or almostEqual(result[0], xs[-1], tol=1e-8)
    assert almostEqual(result[1], ys, tol=1e-4)


# EOF
예제 #29
0
def objective(x, p=5):  # inverted objective, to find the max
    return -1 * percentile(
        p, [np.dot(np.atleast_2d(u[i]), x)[0] for i in range(0, M - 1)])


def constraint(x, p=95, v=C):  # 95%(xTsx) - v <= 0
    x = np.atleast_2d(x)
    return percentile(
        p, [np.dot(np.dot(x, s[i]), x.T)[0, 0] for i in range(0, M - 1)]) - v


bounds = [(0, 1) for i in range(0, N)]

from mystic.penalty import quadratic_inequality


@quadratic_inequality(constraint, k=1e4)
def penalty(x):
    return 0.0


from mystic.solvers import diffev2
from mystic.monitors import VerboseMonitor
mon = VerboseMonitor(10)

result = diffev2(objective, x0=bounds, penalty=penalty, npop=10, gtol=200, \
                 disp=False, full_output=True, itermon=mon, maxiter=M*N*100)

print(result[0])
print(result[1])
예제 #30
0
from mystic.symbolic import generate_constraint, generate_solvers, simplify
from mystic.symbolic import generate_penalty, generate_conditions

equations = """
-x0 + 0.0193*x2 <= 0.0
-x1 + 0.00954*x2 <= 0.0
-pi*x2**2*x3 - (4/3.)*pi*x2**3 + 1296000.0 <= 0.0
x3 - 240.0 <= 0.0
"""
cf = generate_constraint(generate_solvers(simplify(equations)))
pf = generate_penalty(generate_conditions(equations), k=1e12)



if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    from mystic.monitors import VerboseMonitor
    mon = VerboseMonitor(10)

    result = diffev2(objective, x0=bounds, bounds=bounds, constraints=cf, penalty=pf, npop=40, gtol=50, disp=False, full_output=True, itermon=mon)

    assert almostEqual(result[0], xs, rel=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)



# EOF
예제 #31
0

@quadratic_inequality(penalty1, k=1e12)
@quadratic_inequality(penalty2, k=1e12)
@quadratic_inequality(penalty3, k=1e12)
@quadratic_inequality(penalty4, k=1e12)
def penalty(x):
    return 0.0


solver = as_constraint(penalty)

if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     penalty=penalty,
                     npop=40,
                     gtol=500,
                     disp=False,
                     full_output=True)

    assert almostEqual(result[0], xs, rel=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)

# EOF
예제 #32
0
"""

from mystic.symbolic import generate_penalty, generate_conditions
pf = generate_penalty(generate_conditions(equations))
from mystic.symbolic import generate_constraint, generate_solvers, solve
cf = generate_constraint(generate_solvers(solve(equations)))

from numpy import round as npround

if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, npop=20, gtol=50, disp=True, full_output=True)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=npround, npop=40, gtol=50, disp=True, full_output=True)
    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     constraints=cf,
                     npop=4,
                     gtol=1,
                     disp=True,
                     full_output=True)

    print(result[0])
    assert almostEqual(result[0], xs, tol=1e-8)  #XXX: fails b/c rel & zero?
    assert almostEqual(result[1], ys, tol=1e-4)

# EOF
예제 #33
0
from mystic.symbolic import generate_conditions, generate_penalty
pf = generate_penalty(generate_conditions(equations))
from mystic.symbolic import generate_constraint, generate_solvers, simplify
cf = generate_constraint(generate_solvers(simplify(equations)))

# inverted objective, used in solving for the maximum
_objective = lambda x: -objective(x)


if __name__ == '__main__':

  from mystic.solvers import diffev2, fmin_powell
  from mystic.math import almostEqual

  result = diffev2(objective, x0=bounds, bounds=bounds, constraint=cf, penalty=pf, npop=40, disp=False, full_output=True)
  assert almostEqual(result[0], xs, rel=1e-2)
  assert almostEqual(result[1], ys, rel=1e-2)

  result = fmin_powell(objective, x0=[0.0,0.0], bounds=bounds, constraint=cf, penalty=pf, disp=False, full_output=True)
  assert almostEqual(result[0], xs, rel=1e-2)
  assert almostEqual(result[1], ys, rel=1e-2)

  # alternately, solving for the maximum
  result = diffev2(_objective, x0=bounds, bounds=bounds, constraint=cf, penalty=pf, npop=40, disp=False, full_output=True)
  assert almostEqual( result[0], _xs, rel=1e-2)
  assert almostEqual(-result[1], _ys, rel=1e-2)

  result = fmin_powell(_objective, x0=[0,0], bounds=bounds, constraint=cf, penalty=pf, npop=40, disp=False, full_output=True)
  assert almostEqual( result[0], _xs, rel=1e-2)
  assert almostEqual(-result[1], _ys, rel=1e-2)
예제 #34
0
    p = 0.01 * p * len(x)
    if int(p) != p:
        return x[int(np.floor(p))]
    p = int(p)
    return x[p:p+2].mean()

def objective(x, p=5): # inverted objective, to find the max
    return -1*percentile(p, [np.dot(np.atleast_2d(u[i]), x)[0] for i in range(0,M-1)])


def constraint(x, p=95, v=C): # 95%(xTsx) - v <= 0
    x = np.atleast_2d(x)
    return percentile(p, [np.dot(np.dot(x,s[i]),x.T)[0,0] for i in range(0,M-1)]) - v

bounds = [(0,1) for i in range(0,N)]

from mystic.penalty import quadratic_inequality
@quadratic_inequality(constraint, k=1e4)
def penalty(x):
  return 0.0

from mystic.solvers import diffev2
from mystic.monitors import VerboseMonitor
mon = VerboseMonitor(10)

result = diffev2(objective, x0=bounds, penalty=penalty, npop=10, gtol=200, \
                 disp=False, full_output=True, itermon=mon, maxiter=M*N*100)

print(result[0])
print(result[1])