Esempio n. 1
0
def test_inner_solver(nested, solver):

  from mystic.monitors import Monitor
  evalmon = Monitor()
  stepmon = Monitor()

  from mystic.math.measures import mean, spread
  @with_spread(5.0)
  @with_mean(5.0)
  def constraints(x):
    return x

  def cost(x):
    return abs(sum(x) - 5.0)

  from numpy import array
  solver = solver(5)
  lb,ub = [0,0,0,0,0],[100,100,100,100,100]
  solver.SetRandomInitialPoints(lb, ub)
  solver.SetConstraints(constraints)
  solver.SetStrictRanges(lb, ub)
  nested = nested(5, 4)
  nested.SetEvaluationMonitor(evalmon)
  nested.SetGenerationMonitor(stepmon)
  nested.SetNestedSolver(solver)
  nested.Solve(cost, disp=False)
  y = nested.Solution()

  assert almostEqual(mean(y), 5.0, tol=1e-15)
  assert almostEqual(spread(y), 5.0, tol=1e-15)
  assert almostEqual(cost(y), 4*(5.0), tol=1e-6)
Esempio n. 2
0
def test_compare(solvername, x0, **kwds):
  exec "my = solvers.%s" % solvername
  exec "sp = %s" % solvername
  maxiter = kwds.get('maxiter', None)
  maxfun = kwds.get('maxfun', None)
  my_x = my(rosen, x0, disp=0, full_output=True, **kwds)
# itermon = kwds.pop('itermon',None)
  sp_x = sp(rosen, x0, disp=0, full_output=True, **kwds)
  # similar bestSolution and bestEnergy
# print 'my:', my_x[0:2]
# print 'sp:', sp_x[0:2]
  if my_x[3] == sp_x[-2]: # mystic can stop at iter=0, scipy can't
    assert almostEqual(my_x[0], sp_x[0])
    assert almostEqual(my_x[1], sp_x[1])
  # print (iters, fcalls) and [maxiter, maxfun]
# print my_x[2:4], (sp_x[-3],sp_x[-2]), [maxiter, maxfun]
  # test same number of iters and fcalls
  if maxiter and maxfun is not None:
    assert my_x[2] == sp_x[-3]
    assert my_x[3] == sp_x[-2]
#   # test fcalls <= maxfun
#   assert my_x[3] <= maxfun
  if maxiter is not None:
    # test iters <= maxiter
    assert my_x[2] <= maxiter
  return 
Esempio n. 3
0
def test_nested_solver(nested, solver):

  from mystic.monitors import Monitor
  evalmon = Monitor()
  stepmon = Monitor()

  from mystic.math.measures import mean, spread
  @with_spread(5.0)
  @with_mean(5.0)
  def constraints(x):
    return x

  def cost(x):
    return abs(sum(x) - 5.0)

  from numpy import array
  nested = nested(5, 4)
  nested.SetEvaluationMonitor(evalmon)
  nested.SetGenerationMonitor(stepmon)
  nested.SetConstraints(constraints)
  nested.SetNestedSolver(solver)
  nested.Solve(cost, disp=False)
  y = nested.Solution()

  assert almostEqual(mean(y), 5.0, tol=1e-15)
  assert almostEqual(spread(y), 5.0, tol=1e-15)
  assert almostEqual(cost(y), 4*(5.0), tol=1e-6)
Esempio n. 4
0
def test_multi_liner(solver):

  from mystic.monitors import Monitor
  evalmon = Monitor()
  stepmon = Monitor()

  from mystic.math.measures import mean, spread
  @with_spread(5.0)
  @with_mean(5.0)
  def constraints(x):
    return x

  def cost(x):
    return abs(sum(x) - 5.0)

  from numpy import array
  x = array([1,2,3,4,5])
  solver = solver(len(x))
  solver.SetInitialPoints(x)
  solver.SetEvaluationMonitor(evalmon)
  solver.SetGenerationMonitor(stepmon)
  solver.SetConstraints(constraints)
  solver.Solve(cost, disp=False)
  y = solver.Solution()

  assert almostEqual(mean(y), 5.0, tol=1e-15)
  assert almostEqual(spread(y), 5.0, tol=1e-15)
  assert almostEqual(cost(y), 4*(5.0), tol=1e-6)
Esempio n. 5
0
def test_impose_reweighted_variance():

  x0 = [1,2,3,4,5]
  w0 = [3,1,1,1,1]
  v = 1.0

  w = impose_reweighted_variance(v, x0, w0)
  assert almostEqual(variance(x0,w), v)
  assert almostEqual(mean(x0,w0), mean(x0,w))
Esempio n. 6
0
def test_generate_constraint():

  constraints = """
  spread([x0, x1, x2]) = 10.0
  mean([x0, x1, x2]) = 5.0"""

  from mystic.math.measures import mean, spread
  solv = generate_solvers(constraints)
  assert almostEqual(mean(solv[0]([1,2,3])), 5.0)
  assert almostEqual(spread(solv[1]([1,2,3])), 10.0)

  constraint = generate_constraint(solv)
  assert almostEqual(constraint([1,2,3]), [0.0,5.0,10.0], 1e-10)
Esempio n. 7
0
def test_with_mean_spread():

  from mystic.math.measures import mean, spread, impose_mean, impose_spread

  @with_spread(50.0)
  @with_mean(5.0)
  def constrained_squared(x):
    return [i**2 for i in x]

  from numpy import array
  x = array([1,2,3,4,5])
  y = impose_spread(50.0, impose_mean(5.0,[i**2 for i in x]))
  assert almostEqual(mean(y), 5.0, tol=1e-15)
  assert almostEqual(spread(y), 50.0, tol=1e-15)
  assert constrained_squared(x) == y
Esempio n. 8
0
def test_constrain():

  from mystic.math.measures import mean, spread
  from mystic.math.measures import impose_mean, impose_spread
  def mean_constraint(x, mean=0.0):
    return impose_mean(mean, x)

  def range_constraint(x, spread=1.0):
    return impose_spread(spread, x)

  @inner(inner=range_constraint, kwds={'spread':5.0})
  @inner(inner=mean_constraint, kwds={'mean':5.0})
  def constraints(x):
    return x

  def cost(x):
    return abs(sum(x) - 5.0)

  from mystic.solvers import fmin_powell
  from numpy import array
  x = array([1,2,3,4,5])
  y = fmin_powell(cost, x, constraints=constraints, disp=False)

  assert mean(y) == 5.0
  assert spread(y) == 5.0
  assert almostEqual(cost(y), 4*(5.0))
Esempio n. 9
0
 def factory(x, *args, **kwds):
     # apply decorated constraints function
     x = constraints(x, *args, **kwds)
     # constrain x such that sum(x) == mass
     if not almostEqual(sum(x), mass):
         x = normalize(x, mass=mass)
     return x
Esempio n. 10
0
 def factory(x, *args, **kwds):
     # apply decorated constraints function
     x = constraints(x, *args, **kwds)
     # constrain x such that mean(x) == target
     if not almostEqual(mean(x), target):
         x = impose_mean(target, x)#, weights=weights)
     return x
Esempio n. 11
0
 def constraints(x):
     # constrain the last x_i to be the same value as the first x_i
     x[-1] = x[0]
     # constrain x such that mean(x) == target
     if not almostEqual(mean(x), target):
         x = impose_mean(target, x)
     return x
Esempio n. 12
0
def test_impose_reweighted_mean():

  x0 = [1,2,3,4,5]
  w0 = [3,1,1,1,1]
  m = 3.5

  w = impose_reweighted_mean(m, x0, w0)
  assert almostEqual(mean(x0,w), m)
Esempio n. 13
0
def test_one_liner(solver):

  from mystic.math.measures import mean, spread
  @with_spread(5.0)
  @with_mean(5.0)
  def constraints(x):
    return x

  def cost(x):
    return abs(sum(x) - 5.0)

  from numpy import array
  x = array([1,2,3,4,5])
  y = solver(cost, x, constraints=constraints, disp=False)

  assert almostEqual(mean(y), 5.0, tol=1e-15)
  assert almostEqual(spread(y), 5.0, tol=1e-15)
  assert almostEqual(cost(y), 4*(5.0), tol=1e-6)
Esempio n. 14
0
def test_solve_constraint():

  from mystic.math.measures import mean
  @with_mean(1.0)
  def constraint(x):
    x[-1] = x[0]
    return x

  x = solve(constraint, guess=[2,3,1])

  assert almostEqual(mean(x), 1.0, tol=1e-15)
  assert x[-1] == x[0]
  assert issolution(constraint, x)
Esempio n. 15
0
def test_numpy_penalty():

  constraints = """
  mean([x0, x1, x2]) = 5.0
  x0 = x1 + x2"""

  ineq,eq = generate_conditions(constraints)
  assert eq[0]([7,5,3]) == 0.0
  assert eq[1]([7,4,3]) == 0.0

  penalty = generate_penalty((ineq,eq))
  assert penalty([9.0,5,4.0]) == 100.0
  assert penalty([7.5,4,3.5]) == 0.0

  constraint = as_constraint(penalty, solver='fmin')
  assert almostEqual(penalty(constraint([3,4,5])), 0.0, 1e-10)
 def constraints(rv):
   c = product_measure().load(rv, npts)
   # NOTE: bounds wi in [0,1] enforced by filtering
   # impose norm on each discrete measure
   for measure in c:
     if not almostEqual(float(measure.mass), 1.0, tol=atol, rel=rtol):
       measure.normalize()
   # impose expectation on product measure
   ##################### begin function-specific #####################
   E = float(c.expect(model))
   if not (E <= float(target[0] + error[0])) \
   or not (float(target[0] - error[0]) <= E):
     c.set_expect(target[0], model, (x_lb,x_ub), tol=error[0])
   ###################### end function-specific ######################
   # extract weights and positions
   return c.flatten()
Esempio n. 17
0
def test_varnamelist():
    # Demonstrates usage of varnamelist
    varnamelist = ['length', 'width', 'height']
    string = "length = height**2 - 3.5*width"
    # print "symbolic string:\n", string.rstrip()
    string = replace_variables(string, varnamelist, 'x')
    cf = generate_constraint(generate_solvers(string))

    @inner(cf)
    def wrappedfunc(x):
        return x[0]

    #XXX: implement: wrap_constraint( cf, lambda x: x[0], ctype='inner') ?
    # print "c = constraints wrapped around x[0]"
    x0 = [2., 2., 3.]
    # print "c(%s): %s\n" % (x0, wrappedfunc(x0)) # Expected: 2.0
    assert almostEqual(wrappedfunc(x0), 2.0, tol=1e-15)
Esempio n. 18
0
def test_generate_penalty():

  constraints = """
  x0**2 = 2.5*x3 - a
  exp(x2/x0) >= b"""

  ineq,eq = generate_conditions(constraints, nvars=4, locals={'a':5.0, 'b':7.0})
  assert ineq[0]([4,0,0,1,0]) == 6.0
  assert eq[0]([4,0,0,1,0]) == 18.5

  penalty = generate_penalty((ineq,eq))
  assert penalty([1,0,2,2.4]) == 0.0
  assert penalty([1,0,0,2.4]) == 7200.0
  assert penalty([1,0,2,2.8]) == 100.0

  constraint = as_constraint(penalty, nvars=4, solver='fmin')
  assert almostEqual(penalty(constraint([1,0,0,2.4])), 0.0, 1e-10)
Esempio n. 19
0
def impose_reweighted_variance(v, samples, weights=None, solver=None):
    """impose a variance on a list of points by reweighting weights"""
    ndim = len(samples)
    if weights is None:
        weights = [1.0/ndim] * ndim
    if solver is None or solver == 'fmin':
        from mystic.solvers import fmin as solver
    elif solver == 'fmin_powell':
        from mystic.solvers import fmin_powell as solver
    elif solver == 'diffev':
        from mystic.solvers import diffev as solver
    elif solver == 'diffev2':
        from mystic.solvers import diffev2 as solver
    norm = sum(weights)
    m = mean(samples, weights)

    inequality = ""
    equality = ""; equality2 = ""; equality3 = ""
    for i in range(ndim):
        inequality += "x%s >= 0.0\n" % (i) # positive
        equality += "x%s + " % (i)         # normalized
        equality2 += "%s * x%s + " % (float(samples[i]),(i)) # mean
        equality3 += "x%s*(%s-%s)**2 + " % ((i),float(samples[i]),m) # var

    equality += "0.0 = %s\n" % float(norm)
    equality += equality2 + "0.0 = %s*%s\n" % (float(norm),m)
    equality += equality3 + "0.0 = %s*%s\n" % (float(norm),v)

    penalties = generate_penalty(generate_conditions(inequality))
    constrain = generate_constraint(generate_solvers(solve(equality)))

    def cost(x): return sum(x)

    results = solver(cost, weights, constraints=constrain, \
                     penalty=penalties, disp=False, full_output=True)
    wts = list(results[0])
    _norm = results[1] # should have _norm == norm
    warn = results[4]  # nonzero if didn't converge

    #XXX: better to fail immediately if xlo < m < xhi... or the below?
    if warn or not almostEqual(_norm, norm):
        print "Warning: could not impose mean through reweighting"
        return None #impose_variance(v, samples, weights), weights

    return wts #samples, wts  # "mean-preserving"
Esempio n. 20
0
  def cone_mesh(length):
    """ construct a conical mesh for a given length of cone """
    L1,L2,L3 = slope
    radius = length / L3 #XXX: * 0.5
    r0 = ZERO

    if almostEqual(radius, r0, tol=r0): radius = r0
    r = np.linspace(radius,radius,6) 
    r[0]= np.zeros(r[0].shape) 
    r[1] *= r0/radius
    r[5] *= r0/radius
    r[3]= np.zeros(r[3].shape) 

    p = np.linspace(0,2*np.pi,50) 
    R,P = np.meshgrid(r,p) 
    X,Y = L1 * R*np.cos(P), L2 * R*np.sin(P) 

    tmp=list() 
    for i in range(np.size(p)): 
      tmp.append([0,0,length,length,length,0]) # len = size(r)
    Z = np.array(tmp) 
    return X,Z,Y
Esempio n. 21
0
def test_matrix_interface():
    # Demonstrates linear_symbolic()
    A = asarray([[3., 4., 5.],
         [1., 6., -9.]])
    b = asarray([0., 0.])
    G = [1., 0., 0.]
    h = [5.]
    # print "equality constraints"
    # print "G: %s" % G
    # print "h: %s" % h
    # print "inequality constraints"
    # print "A:\n%s" % A
    # print "b: %s" % b
    constraints_string = linear_symbolic(A=A, b=b, G=G, h=h)
    cs = constraints_string.split('\n')
    assert cs[0] == "1.0*x0 + 0.0*x1 + 0.0*x2 <= 5.0"
    assert cs[1] == "3.0*x0 + 4.0*x1 + 5.0*x2 = 0.0"
    assert cs[2] == "1.0*x0 + 6.0*x1 + -9.0*x2 = 0.0"
    # print "symbolic string:\n", constraints_string.rstrip()
    pf = generate_penalty(generate_conditions(constraints_string))
    cn = as_constraint(pf)

    x0 = [1., 1., 1.]
    assert almostEqual(pf(cn(x0)), 0.0, tol=1e-2)
Esempio n. 22
0
def test_setexpectmeanvar(m, expect=5.0, tol=0.001):
    m.set_expect_mean_and_var((expect,expect), f, tol=tol)
    assert almostEqual(m.expect(f), expect, tol=tol)
    assert almostEqual(m.expect_var(f), expect, tol=tol)
Esempio n. 23
0
def test_expectvar(m, f):
    pos = m.positions
    if not (len(pos) and isinstance(pos[0], tuple)): # then m is a measure
        pos = [[i] for i in pos]
    expect = expected_variance(f, pos, m.weights)
    assert almostEqual(m.expect_var(f), expect)
Esempio n. 24
0
if __name__ == '__main__':

    from mystic.solvers import diffev2, fmin_powell
    from mystic.math import almostEqual

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     penalty=pf,
                     constraint=cf,
                     npop=40,
                     disp=False,
                     full_output=True,
                     ftol=1e-10,
                     gtol=100)
    assert almostEqual(result[0], xs, rel=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)

    result = fmin_powell(objective,
                         x0=[0.0, 0.0],
                         bounds=bounds,
                         penalty=pf,
                         constraint=cf,
                         disp=False,
                         full_output=True,
                         gtol=3)
    assert almostEqual(result[0], xs, rel=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)

# EOF
Esempio n. 25
0

def cost(x):
    kx = constrain(x)
    y = objective(kx)
    mon(kx, y)
    return y


if __name__ == '__main__':
    from mystic.math import almostEqual

    result = so.fmin(cost, [1, 1, 1],
                     xtol=1e-6,
                     ftol=1e-6,
                     full_output=True,
                     disp=False)
    # check results are consistent with monitor
    assert almostEqual(result[1], min(mon.y), rel=1e-2)

    # check results satisfy constraints
    A, B, C = result[0]
    print(dict(A=A, B=B, C=C))

    eps = 0.2  #XXX: give it some small wiggle room for small violations
    assert A * B + C >= 1 - eps
    assert B <= A + eps
    assert (5 + eps) >= A >= -(5 + eps)
    assert (5 + eps) >= B >= -(5 + eps)
    assert (5 + eps) >= C >= -(5 + eps)
Esempio n. 26
0
    from scipy.optimize import curve_fit
    xs, pcov = curve_fit(lambda x, *coeffs: y0(coeffs, x), x, y, p0=[1, 1, 1])
except ImportError:
    xs = x0
ys = objective(xs, x, y)

if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    # from mystic.monitors import VerboseMonitor
    # mon = VerboseMonitor(10)

    result = diffev2(objective,
                     args=args,
                     x0=bounds,
                     bounds=bounds,
                     npop=40,
                     ftol=1e-8,
                     gtol=100,
                     disp=False,
                     full_output=True)  #, itermon=mon)
    # print("%s %s" % (result[0], xs))
    assert almostEqual(result[0], xs, rel=2e-1)
    assert almostEqual(result[1], ys, rel=2e-1)

#XXX: how approximate the covariance matrix of estimates (pcov) w/ mystic?
#XXX: mystic should have leastsq

# EOF
Esempio n. 27
0
pens = ms.generate_penalty(ms.generate_conditions(eqns), k=1e3)
bounds = [(0., None), (0., 4.)]

# get the objective
def objective(x):
  x = np.asarray(x)
  return x[0]**2 + 4*x[1]**2 - 32*x[1] + 64

x0 = np.random.rand(2)

# compare against the exact minimum
xs = np.array([2., 3.])
ys = objective(xs)


sol = my.fmin_powell(objective, x0, constraint=cons, penalty=pens, disp=False,
                     bounds=bounds, gtol=3, ftol=1e-6, full_output=True)

assert mm.almostEqual(sol[0], xs, tol=1e-2)
assert mm.almostEqual(sol[1], ys, tol=1e-2)


sol = my.diffev(objective, bounds, constraint=cons, penalty=pens, disp=False,
                bounds=bounds, npop=10, gtol=100, ftol=1e-6, full_output=True)

assert mm.almostEqual(sol[0], xs, tol=1e-2)
assert mm.almostEqual(sol[1], ys, tol=1e-2)


# EOF
Esempio n. 28
0
from mystic.constraints import unique

from numpy import round, hstack, clip
def constraint(x):
    x = round(x).astype(int) # force round and convert type to int
    x = clip(x, 1,n)         #XXX: impose bounds
    x = unique(x, range(1,n+1))
    return x


if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    from mystic.monitors import Monitor, VerboseMonitor
    mon = VerboseMonitor(10)#,10)

    result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, constraints=constraint, npop=50, ftol=1e-8, gtol=200, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)

    print result[0]
    assert almostEqual(result[0], xs[0], tol=1e-8) \
        or almostEqual(result[0], xs[1], tol=1e-8) \
        or almostEqual(result[0], xs[2], tol=1e-8) \
        or almostEqual(result[0], xs[3], tol=1e-8) \
        or almostEqual(result[0], xs[4], tol=1e-8) \
        or almostEqual(result[0], xs[-1], tol=1e-8)
    assert almostEqual(result[1], ys, tol=1e-4)


# EOF
Esempio n. 29
0
def equations(len=3):
    eqn = "\nsum(["
    for i in range(len):
        eqn += 'x%s**2, ' % str(i)
    return eqn[:-2]+"]) - 1.0 = 0.0\n"

def cf(len=3):
    return generate_constraint(generate_solvers(solve(equations(len))))
def pf(len=3):
    return generate_penalty(generate_conditions(equations(len)))



if __name__ == '__main__':
    x = xs(10)
    y = ys(len(x))
    bounds = bounds(len(x))
    cf = cf(len(x))

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective, x0=bounds, bounds=bounds, constraints=cf, npop=40, gtol=500, disp=False, full_output=True)

    assert almostEqual(result[0], x, tol=1e-2)
    assert almostEqual(result[1], y, tol=1e-2)



# EOF
Esempio n. 30
0
    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    from mystic.monitors import Monitor, VerboseMonitor
    mon = VerboseMonitor(10)  #,10)

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     penalty=penalty,
                     constraints=constraint,
                     npop=50,
                     ftol=1e-8,
                     gtol=200,
                     disp=True,
                     full_output=True,
                     cross=0.1,
                     scale=0.9,
                     itermon=mon)

    print(result[0])
    assert almostEqual(result[0], xs[0], tol=1e-8) \
        or almostEqual(result[0], xs[1], tol=1e-8) \
        or almostEqual(result[0], xs[2], tol=1e-8) \
        or almostEqual(result[0], xs[3], tol=1e-8) \
        or almostEqual(result[0], xs[4], tol=1e-8) \
        or almostEqual(result[0], xs[-1], tol=1e-8)
    assert almostEqual(result[1], ys, tol=1e-4)

# EOF
Esempio n. 31
0
from mystic.symbolic import generate_conditions, generate_penalty
pf = generate_penalty(generate_conditions(equations))
from mystic.symbolic import generate_constraint, generate_solvers, simplify
cf = generate_constraint(generate_solvers(simplify(equations)))

# inverted objective, used in solving for the maximum
_objective = lambda x: -objective(x)


if __name__ == '__main__':

  from mystic.solvers import diffev2, fmin_powell
  from mystic.math import almostEqual

  result = diffev2(objective, x0=bounds, bounds=bounds, constraint=cf, penalty=pf, npop=40, disp=False, full_output=True)
  assert almostEqual(result[0], xs, rel=1e-2)
  assert almostEqual(result[1], ys, rel=1e-2)

  result = fmin_powell(objective, x0=[0.0,0.0], bounds=bounds, constraint=cf, penalty=pf, disp=False, full_output=True)
  assert almostEqual(result[0], xs, rel=1e-2)
  assert almostEqual(result[1], ys, rel=1e-2)

  # alternately, solving for the maximum
  result = diffev2(_objective, x0=bounds, bounds=bounds, constraint=cf, penalty=pf, npop=40, disp=False, full_output=True)
  assert almostEqual( result[0], _xs, rel=1e-2)
  assert almostEqual(-result[1], _ys, rel=1e-2)

  result = fmin_powell(_objective, x0=[0,0], bounds=bounds, constraint=cf, penalty=pf, npop=40, disp=False, full_output=True)
  assert almostEqual( result[0], _xs, rel=1e-2)
  assert almostEqual(-result[1], _ys, rel=1e-2)
def test_griewangk(verbose=False):
    """Test Griewangk's function, which has many local minima.

Testing Griewangk:
Expected: x=[0.]*10 and f=0

Using DifferentialEvolutionSolver:
Solution:  [  8.87516194e-09   7.26058147e-09   1.02076001e-08   1.54219038e-08
  -1.54328461e-08   2.34589663e-08   2.02809360e-08  -1.36385836e-08
   1.38670373e-08   1.59668900e-08]
f value:  0.0
Iterations:  4120
Function evaluations:  205669
Time elapsed:  34.4936850071  seconds

Using DifferentialEvolutionSolver2:
Solution:  [ -2.02709316e-09   3.22017968e-09   1.55275472e-08   5.26739541e-09
  -2.18490470e-08   3.73725584e-09  -1.02315312e-09   1.24680355e-08
  -9.47898116e-09   2.22243557e-08]
f value:  0.0
Iterations:  4011
Function evaluations:  200215
Time elapsed:  32.8412370682  seconds
"""

    if verbose:
        print("Testing Griewangk:")
        print("Expected: x=[0.]*10 and f=0")
    from mystic.models import griewangk as costfunc
    ndim = 10
    lb = [-400.] * ndim
    ub = [400.] * ndim
    maxiter = 10000
    seed = 123  # Re-seed for each solver to have them all start at same x0

    # DifferentialEvolutionSolver
    if verbose: print("\nUsing DifferentialEvolutionSolver:")
    npop = 50
    random_seed(seed)
    from mystic.solvers import DifferentialEvolutionSolver
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.termination import CandidateRelativeTolerance as CRT
    from mystic.termination import VTR
    from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp
    esow = Monitor()
    ssow = Monitor()
    solver = DifferentialEvolutionSolver(ndim, npop)
    solver.SetRandomInitialPoints(lb, ub)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    solver.enable_signal_handler()
    #term = COG(1e-10)
    #term = CRT()
    term = VTR(0.)
    time1 = time.time()  # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Exp, \
                 CrossProbability=0.3, ScalingFactor=1.0)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    if verbose:
        print("Solution: %s" % sol)
        print("f value: %s" % fx)
        print("Iterations: %s" % solver.generations)
        print("Function evaluations: %s" % len(esow.x))
        print("Time elapsed: %s seconds" % time_elapsed)
    assert almostEqual(fx, 0.0, tol=3e-3)

    # DifferentialEvolutionSolver2
    if verbose: print("\nUsing DifferentialEvolutionSolver2:")
    npop = 50
    random_seed(seed)
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.termination import CandidateRelativeTolerance as CRT
    from mystic.termination import VTR
    from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp
    esow = Monitor()
    ssow = Monitor()
    solver = DifferentialEvolutionSolver2(ndim, npop)
    solver.SetRandomInitialPoints(lb, ub)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    #term = COG(1e-10)
    #term = CRT()
    term = VTR(0.)
    time1 = time.time()  # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Exp, \
                 CrossProbability=0.3, ScalingFactor=1.0)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    if verbose:
        print("Solution: %s" % sol)
        print("f value: %s" % fx)
        print("Iterations: %s" % solver.generations)
        print("Function evaluations: %s" % len(esow.x))
        print("Time elapsed: %s seconds" % time_elapsed)
    assert almostEqual(fx, 0.0, tol=3e-3)
Esempio n. 33
0
# compare against the exact minimum
xs = np.array([.5, 1.224744871])
ys = objective(xs)

sol = my.fmin_powell(objective,
                     x0,
                     constraint=cons,
                     penalty=pens,
                     disp=False,
                     bounds=bounds,
                     gtol=3,
                     ftol=1e-6,
                     full_output=True)

assert mm.almostEqual(sol[0], xs, tol=1e-2)
assert mm.almostEqual(sol[1], ys, tol=1e-2)

sol = my.diffev(objective,
                bounds,
                constraint=cons,
                penalty=pens,
                disp=False,
                bounds=bounds,
                npop=10,
                gtol=100,
                ftol=1e-6,
                full_output=True)

assert mm.almostEqual(sol[0], xs, tol=1e-2)
assert mm.almostEqual(sol[1], ys, tol=1e-2)
Esempio n. 34
0
@quadratic_equality(penalty1)
@quadratic_equality(penalty2)
@quadratic_equality(penalty3)
def penalty(x):
    return 0.0


solver = as_constraint(penalty)

if __name__ == '__main__':

    from mystic.solvers import lattice
    from mystic.math import almostEqual

    result = lattice(objective,
                     5, [2] * 5,
                     bounds=bounds,
                     penalty=penalty,
                     ftol=1e-8,
                     xtol=1e-8,
                     disp=False,
                     full_output=True)

    assert almostEqual(result[0], xs, tol=2e-2) \
        or almostEqual(result[0], _xs, tol=2e-2) \
        or almostEqual(result[0], x_s, tol=2e-2) \
        or almostEqual(result[0], xs_, tol=2e-2)
    assert almostEqual(result[1], ys, rel=2e-2)

# EOF
Esempio n. 35
0
def test_calculate_methods(npts=2):
    upper_bounds = [1.0]
    lower_bounds = [0.0]

    # -------------------------------------
    # generate initial coordinates, weights
    # -------------------------------------
    # get a random distribution of points
    if disp: print("generate random points and weights")
    coordinates = samplepts(lower_bounds, upper_bounds, npts)
    D0 = D = [i[0] for i in coordinates]
    if disp: print("positions: %s" % D)

    # calculate sample range
    R0 = R = spread(D)
    if disp: print("range: %s" % R)

    # select weights randomly in [0,1], then normalize so sum(weights) = 1
    wts = random_samples([0], [1], npts)[0]
    weights = normalize(wts, 0.0, zsum=True)
    if disp: print("weights (when normalized to 0.0): %s" % weights)
    assert almostEqual(sum(weights), 0.0, tol=1e-15)
    weights = normalize(wts, 1.0)
    assert almostEqual(sum(weights), 1.0, tol=1e-15)
    if disp: print("weights (when normalized to 1.0): %s" % weights)
    w = norm(weights)
    if disp: print("norm: %s" % w)
    assert almostEqual(w, sum(weights) / npts)

    # calculate sample mean
    m0 = m = mean(D, weights)
    if disp: print("mean: %s" % m)
    if disp: print("")

    # -------------------------------------
    # modify coordinates, maintaining mean & range
    # -------------------------------------
    # get new random distribution
    if disp: print("modify positions, maintaining mean and range")
    coordinates = samplepts(lower_bounds, upper_bounds, npts)
    D = [i[0] for i in coordinates]

    # impose a range and mean on the points
    D = impose_spread(R, D, weights)
    D = impose_mean(m, D, weights)

    # print results
    if disp: print("positions: %s" % D)
    R = spread(D)
    if disp: print("range: %s" % R)
    assert almostEqual(R, R0)
    m = mean(D, weights)
    if disp: print("mean: %s" % m)
    assert almostEqual(m, m0)
    if disp: print("")

    # -------------------------------------
    # modify weights, maintaining mean & norm
    # -------------------------------------
    # select weights randomly in [0,1]
    if disp: print("modify weights, maintaining mean and range")
    wts = random_samples([0], [1], npts)[0]

    # print intermediate results
    #print("weights: %s" % wts)
    #sm = mean(D, wts)
    #print("tmp mean: %s" % sm)
    #print("")

    # impose mean and weight norm on the points
    D = impose_mean(m, D, wts)
    DD, weights = impose_weight_norm(D, wts)

    # print results
    if disp: print("weights: %s" % weights)
    w = norm(weights)
    if disp: print("norm: %s" % w)
    assert almostEqual(w, sum(weights) / npts)
    if disp: print("positions: %s" % DD)
    R = spread(DD)
    if disp: print("range: %s" % R)
    assert almostEqual(R, R0)
    sm = mean(DD, weights)
    if disp: print("mean: %s" % sm)
    assert almostEqual(sm, m0)
    sv = variance(DD, weights)
    if disp: print("var: %s" % sv)
    assert not almostEqual(sv, R)
    assert almostEqual(sv, 0.0, tol=.3)

    # -------------------------------------
    # modify variance, maintaining mean
    # -------------------------------------
    if disp: print("\nmodify variance, maintaining mean")
    DD = impose_variance(R, DD, weights)
    sm = mean(DD, weights)
    if disp: print("mean: %s" % sm)
    assert almostEqual(sm, m0)
    sv = variance(DD, weights)
    if disp: print("var: %s" % sv)
    assert almostEqual(sv, R)
Esempio n. 36
0
def test_collection_behavior():
    from mystic.math import almostEqual
    from numpy import inf

    def f(x):
        return sum(x)  # a test function for expectation value

    # build three sets (x,y,z)
    sx = set([point(1.0, 1.0), point(2.0, 1.0), point(3.0, 2.0)])
    sy = set([point(0.0, 1.0), point(3.0, 2.0)])
    sz = set([point(1.0, 1.0), point(2.0, 3.0)])
    #NOTE: for marc_surr(x,y,z), we must have x > 0.0
    sx.normalize()
    sy.normalize()
    sz.normalize()
    assert sx.mass == sy.mass == sz.mass == 1.0

    # build a collection
    c = collection([sx, sy, sz])
    xpos = c[0].positions
    ypos = c[1].positions
    zpos = c[2].positions
    xwts = c[0].weights
    ywts = c[1].weights
    zwts = c[2].weights
    if disp:
        print("x_positions: %s" % xpos)
        print("y_positions: %s" % ypos)
        print("z_positions: %s" % zpos)
        print("x_weights: %s" % xwts)
        print("y_weights: %s" % ywts)
        print("z_weights: %s" % zwts)
    assert xpos == sx.positions
    assert ypos == sy.positions
    assert zpos == sz.positions
    assert xwts == sx.weights
    assert ywts == sy.weights
    assert zwts == sz.weights
    tol = .2
    supp = c.support(tol)
    positions = c.positions
    weights = c.weights
    assert supp == [p for (p, w) in zip(positions, weights) if w > tol]
    if disp:
        print("support points:\n %s" % supp)
        print("npts: %s (i.e. %s)" % (c.npts, c.pts))
        print("weights: %s" % weights)
        print("positions: %s" % positions)
    assert c.npts == sx.npts * sy.npts * sz.npts
    assert len(weights) == len(positions) == c.npts
    assert sx.positions in c.pos
    assert sy.positions in c.pos
    assert sz.positions in c.pos
    assert sx.weights in c.wts
    assert sy.weights in c.wts
    assert sz.weights in c.wts

    c_exp = c.expect(f)
    if disp:
        print("mass: %s" % c.mass)
        print("expect: %s" % c_exp)
    assert c.mass == [sx.mass, sy.mass, sz.mass]
    assert c_exp == expectation(f, c.positions, c.weights)

    #print("center: %s" % c.center)
    #print("delta: %s" % c.delta)

    # change the positions in the collection
    points = [list(i) for i in positions[::3]]
    for i in range(len(points)):
        points[i][0] = 0.5
    positions[::3] = points
    c.positions = positions

    _xpos = c[0].positions
    _ypos = c[1].positions
    _zpos = c[2].positions
    _cexp = c.expect(f)
    if disp:
        print("x_positions: %s" % _xpos)
        print("y_positions: %s" % _ypos)
        print("z_positions: %s" % _zpos)
        print("expect: %s" % _cexp)
    assert _xpos == [0.5] + xpos[1:]
    assert _ypos == ypos
    assert _zpos == zpos
    assert _cexp < c_exp  # due to _xpos[0] is 0.5 and less than 1.0

    _mean = 85.0
    _range = 0.25

    c.set_expect(_mean, f, npop=40, maxiter=200, tol=_range)
    _exp = c.expect(f)
    if disp:
        print("mean: %s" % _mean)
        print("range: %s" % _range)
        print("expect: %s" % _exp)
    assert almostEqual(_mean, _exp, tol=_mean * 0.01)

    # a test function for probability of failure
    def g(x):
        if f(x) <= 0.0: return False
        return True

    pof = c.pof(g)
    spof = c.sampled_pof(g, npts=10000)
    if disp:
        print("pof: %s" % pof)
        print("sampled_pof: %s" % spof)
    assert almostEqual(pof, spof, tol=0.02)
    return
Esempio n. 37
0
from mystic.symbolic import generate_constraint, generate_solvers, simplify
from mystic.symbolic import generate_penalty, generate_conditions

equations = """
x0**2 + x1**2 + x2**2 + x3**2 + x4**2 - 10.0 = 0.0
x1*x2 - 5.0*x3*x4 = 0.0
x0**3 + x1**3 + 1.0 = 0.0
"""
cf = generate_constraint(generate_solvers(simplify(equations))) # slow solve
pf = generate_penalty(generate_conditions(equations))



if __name__ == '__main__':

    from mystic.solvers import lattice
    from mystic.math import almostEqual

    result = lattice(objective, 5, [3]*5, bounds=bounds, penalty=pf, ftol=1e-8, xtol=1e-8, disp=False, full_output=True)

    assert almostEqual(result[0], xs, tol=1e-2) \
        or almostEqual(result[0], _xs, tol=1e-2) \
        or almostEqual(result[0], x_s, tol=1e-2) \
        or almostEqual(result[0], xs_, tol=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)



# EOF
Esempio n. 38
0
def test_setexpectvar(m, expect=5.0, tol=0.001):
    m.set_expect_var(expect, f, tol=tol)
    assert almostEqual(m.expect_var(f), expect, tol=tol)
Esempio n. 39
0
def test_griewangk():
    """Test Griewangk's function, which has many local minima.

Testing Griewangk:
Expected: x=[0.]*10 and f=0

Using DifferentialEvolutionSolver:
Solution:  [  8.87516194e-09   7.26058147e-09   1.02076001e-08   1.54219038e-08
  -1.54328461e-08   2.34589663e-08   2.02809360e-08  -1.36385836e-08
   1.38670373e-08   1.59668900e-08]
f value:  0.0
Iterations:  4120
Function evaluations:  205669
Time elapsed:  34.4936850071  seconds

Using DifferentialEvolutionSolver2:
Solution:  [ -2.02709316e-09   3.22017968e-09   1.55275472e-08   5.26739541e-09
  -2.18490470e-08   3.73725584e-09  -1.02315312e-09   1.24680355e-08
  -9.47898116e-09   2.22243557e-08]
f value:  0.0
Iterations:  4011
Function evaluations:  200215
Time elapsed:  32.8412370682  seconds
"""

    print "Testing Griewangk:"
    print "Expected: x=[0.]*10 and f=0"
    from mystic.models import griewangk as costfunc
    ndim = 10
    lb = [-400.]*ndim
    ub = [400.]*ndim
    maxiter = 10000
    seed = 123 # Re-seed for each solver to have them all start at same x0
    
    # DifferentialEvolutionSolver
    print "\nUsing DifferentialEvolutionSolver:"
    npop = 50
    random_seed(seed)
    from mystic.solvers import DifferentialEvolutionSolver
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.termination import CandidateRelativeTolerance as CRT
    from mystic.termination import VTR
    from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp
    esow = Monitor()
    ssow = Monitor() 
    solver = DifferentialEvolutionSolver(ndim, npop)
    solver.SetRandomInitialPoints(lb, ub)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    solver.enable_signal_handler()
    #term = COG(1e-10)
    #term = CRT()
    term = VTR(0.)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Exp, \
                 CrossProbability=0.3, ScalingFactor=1.0)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 0.0, tol=3e-3)

    # DifferentialEvolutionSolver2
    print "\nUsing DifferentialEvolutionSolver2:"
    npop = 50
    random_seed(seed)
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.termination import CandidateRelativeTolerance as CRT
    from mystic.termination import VTR
    from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp
    esow = Monitor()
    ssow = Monitor() 
    solver = DifferentialEvolutionSolver2(ndim, npop)
    solver.SetRandomInitialPoints(lb, ub)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    #term = COG(1e-10)
    #term = CRT()
    term = VTR(0.)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Exp, \
                 CrossProbability=0.3, ScalingFactor=1.0)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 0.0, tol=3e-3)
Esempio n. 40
0
def test_setexpect(m, expect=5.0, tol=0.001):
    #e_lo = expect - expect
    #e_hi = expect + expect
    m.set_expect(expect, f, tol=tol)#, bounds=([e_lo]*m.npts, [e_hi]*m.npts)) 
    assert almostEqual(m.expect(f), expect, tol=tol)
Esempio n. 41
0
def test_expect(constrain=False):
    G = marc_surr  #XXX: uses the above-provided test function
    function_name = G.__name__

    _mean = 06.0  #NOTE: SET THE mean HERE!
    _range = 00.5  #NOTE: SET THE range HERE!
    nx = 3  #NOTE: SET THE NUMBER OF 'h' POINTS HERE!
    ny = 3  #NOTE: SET THE NUMBER OF 'a' POINTS HERE!
    nz = 3  #NOTE: SET THE NUMBER OF 'v' POINTS HERE!

    h_lower = [60.0]
    a_lower = [0.0]
    v_lower = [2.1]
    h_upper = [105.0]
    a_upper = [30.0]
    v_upper = [2.8]

    lower_bounds = (nx * h_lower) + (ny * a_lower) + (nz * v_lower)
    upper_bounds = (nx * h_upper) + (ny * a_upper) + (nz * v_upper)
    bounds = (lower_bounds, upper_bounds)

    if debug:
        print " model: f(x) = %s(x)" % function_name
        print " mean: %s" % _mean
        print " range: %s" % _range
        print "..............\n"

    if debug:
        param_string = "["
        for i in range(nx):
            param_string += "'x%s', " % str(i + 1)
        for i in range(ny):
            param_string += "'y%s', " % str(i + 1)
        for i in range(nz):
            param_string += "'z%s', " % str(i + 1)
        param_string = param_string[:-2] + "]"

        print " parameters: %s" % param_string
        print " lower bounds: %s" % lower_bounds
        print " upper bounds: %s" % upper_bounds
    # print " ..."

    wx = [1.0 / float(nx)] * nx
    wy = [1.0 / float(ny)] * ny
    wz = [1.0 / float(nz)] * nz

    from mystic.math.measures import _pack, _unpack
    wts = _pack([wx, wy, wz])
    weights = [i[0] * i[1] * i[2] for i in wts]

    if not constrain:
        constraints = None
    else:  # impose a mean constraint on 'thickness'
        h_mean = (h_upper[0] + h_lower[0]) / 2.0
        h_error = 1.0
        v_mean = (v_upper[0] + v_lower[0]) / 2.0
        v_error = 0.05
        if debug:
            print "impose: mean[x] = %s +/- %s" % (str(h_mean), str(h_error))
            print "impose: mean[z] = %s +/- %s" % (str(v_mean), str(v_error))

        def constraints(x, w):
            from mystic.math.discrete import compose, decompose
            c = compose(x, w)
            E = float(c[0].mean)
            if not (E <= float(h_mean + h_error)) or not (
                    float(h_mean - h_error) <= E):
                c[0].mean = h_mean
            E = float(c[2].mean)
            if not (E <= float(v_mean + v_error)) or not (
                    float(v_mean - v_error) <= E):
                c[2].mean = v_mean
            return decompose(c)[0]

    from mystic.math.measures import mean, expectation, impose_expectation
    samples = impose_expectation((_mean,_range), G, (nx,ny,nz), bounds, \
                                        weights, constraints=constraints)

    smp = _unpack(samples, (nx, ny, nz))
    if debug:
        from numpy import array
        # rv = [xi]*nx + [yi]*ny + [zi]*nz
        print "\nsolved [x]: %s" % array(smp[0])
        print "solved [y]: %s" % array(smp[1])
        print "solved [z]: %s" % array(smp[2])
        #print "solved: %s" % smp
    mx = mean(smp[0])
    my = mean(smp[1])
    mz = mean(smp[2])
    if debug:
        print "\nmean[x]: %s" % mx  # weights are all equal
        print "mean[y]: %s" % my  # weights are all equal
        print "mean[z]: %s\n" % mz  # weights are all equal
    if constrain:
        assert almostEqual(mx, h_mean, tol=h_error)
        assert almostEqual(mz, v_mean, tol=v_error)

    Ex = expectation(G, samples, weights)
    cost = (Ex - _mean)**2
    if debug:
        print "expect: %s" % Ex
        print "cost = (E[G] - m)^2: %s" % cost
    assert almostEqual(cost, 0.0, 0.01)
Esempio n. 42
0
def test_set_behavior():
    from mystic.math import almostEqual
    from numpy import inf

    # check basic behavior for set of two points
    s = set([point(1.0, 1.0), point(3.0, 2.0)])  #XXX: s + [pt, pt] => broken
    assert almostEqual(s.mean, 2.33333333)
    assert almostEqual(s.range, 2.0)
    assert almostEqual(s.mass, 3.0)

    # basic behavior for an admissable set
    s.normalize()
    s.mean = 1.0
    s.range = 1.0
    assert almostEqual(s.mean, 1.0)
    assert almostEqual(s.range, 1.0)
    assert almostEqual(s.mass, 1.0)

    # add and remove points
    # test special cases: SUM(weights)=0, RANGE(samples)=0, SUM(samples)=0
    s.append(point(1.0, -1.0))
    assert s.mean == -inf
    assert almostEqual(s.range, 1.0)
    assert almostEqual(s.mass, 0.0)
    '''
  _ave = s.mean
  s.mean = 1.0
  assert str(s.mean) == 'nan'
  assert str(s.range) == 'nan'
  assert almostEqual(s.mass, 0.0)
  s.normalize()
  assert str(s.mass) == 'nan'
  s.mean = _ave
  '''

    s.pop()
    s[0] = point(1.0, 1.0)
    s[1] = point(-1.0, 1.0)
    assert almostEqual(s.mean, 0.0)
    assert almostEqual(s.range, 2.0)
    assert almostEqual(s.mass, 2.0)
    s.normalize()
    s.mean = 1.0
    assert almostEqual(s.mean, 1.0)
    assert almostEqual(s.range, 2.0)
    assert almostEqual(s.mass, 1.0)

    s[0] = point(1.0, 1.0)
    s[1] = point(1.0, 1.0)
    assert almostEqual(s.mean, 1.0)
    assert almostEqual(s.range, 0.0)
    assert almostEqual(s.mass, 2.0)

    s.range = 1.0
    assert str(s.mean) == 'nan'
    assert str(s.range) == 'nan'
    assert almostEqual(s.mass, 2.0)
    return
Esempio n. 43
0
    from mystic.solvers import diffev2, fmin_powell
    from mystic.math import almostEqual
    # from mystic.monitors import VerboseMonitor
    # mon = VerboseMonitor(10)

    result = diffev2(objective,
                     args=args,
                     x0=bounds,
                     bounds=bounds,
                     npop=40,
                     ftol=1e-8,
                     disp=False,
                     full_output=True)  #, itermon=mon)
    # print(result[0])
    assert almostEqual(result[0], xs, tol=1e-8) \
        or almostEqual(result[0], xs_,tol=1e-8)
    assert almostEqual(result[1], ys, tol=1e-5)

    result = fmin_powell(objective,
                         args=args,
                         x0=[0.0, 0.0, 0.0],
                         bounds=bounds,
                         disp=False,
                         full_output=True)
    assert almostEqual(result[0], xs, tol=1e-8) \
        or almostEqual(result[0], xs_,tol=1e-8)
    assert almostEqual(result[1], ys, tol=1e-5)

# EOF
Esempio n. 44
0
    from mystic.solvers import diffev2, fmin_powell
    from mystic.math import almostEqual
    # from mystic.monitors import VerboseMonitor
    # mon = VerboseMonitor(10)

    result = diffev2(objective,
                     args=args,
                     x0=bounds,
                     bounds=bounds,
                     npop=40,
                     ftol=1e-8,
                     gtol=100,
                     disp=False,
                     full_output=True)  #, itermon=mon)
    # print("%s %s" % (result[0], xs))
    assert almostEqual(result[0], xs, rel=1e-1)
    assert almostEqual(result[1], ys, rel=1e-1)

    result = fmin_powell(objective,
                         args=args,
                         x0=[0.0, 0.0],
                         bounds=bounds,
                         disp=False,
                         full_output=True)
    # print("%s %s" % (result[0], xs))
    assert almostEqual(result[0], xs, rel=1e-1)
    assert almostEqual(result[1], ys, rel=1e-1)

    # mon = VerboseMonitor(10)
    result = diffev2(objective,
                     args=args,
def test_rosenbrock(verbose=False):
    """Test the 2-dimensional Rosenbrock function.

Testing 2-D Rosenbrock:
Expected: x=[1., 1.] and f=0

Using DifferentialEvolutionSolver:
Solution:  [ 1.00000037  1.0000007 ]
f value:  2.29478683682e-13
Iterations:  99
Function evaluations:  3996
Time elapsed:  0.582273006439  seconds

Using DifferentialEvolutionSolver2:
Solution:  [ 0.99999999  0.99999999]
f value:  3.84824937598e-15
Iterations:  100
Function evaluations:  4040
Time elapsed:  0.577210903168  seconds

Using NelderMeadSimplexSolver:
Solution:  [ 0.99999921  1.00000171]
f value:  1.08732211477e-09
Iterations:  70
Function evaluations:  130
Time elapsed:  0.0190329551697  seconds

Using PowellDirectionalSolver:
Solution:  [ 1.  1.]
f value:  0.0
Iterations:  28
Function evaluations:  859
Time elapsed:  0.113857030869  seconds
"""

    if verbose:
        print("Testing 2-D Rosenbrock:")
        print("Expected: x=[1., 1.] and f=0")
    from mystic.models import rosen as costfunc
    ndim = 2
    lb = [-5.] * ndim
    ub = [5.] * ndim
    x0 = [2., 3.]
    maxiter = 10000

    # DifferentialEvolutionSolver
    if verbose: print("\nUsing DifferentialEvolutionSolver:")
    npop = 40
    from mystic.solvers import DifferentialEvolutionSolver
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.strategy import Rand1Bin
    esow = Monitor()
    ssow = Monitor()
    solver = DifferentialEvolutionSolver(ndim, npop)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = COG(1e-10)
    time1 = time.time()  # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Bin)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    if verbose:
        print("Solution: %s" % sol)
        print("f value: %s" % fx)
        print("Iterations: %s" % solver.generations)
        print("Function evaluations: %s" % len(esow.x))
        print("Time elapsed: %s seconds" % time_elapsed)
    assert almostEqual(fx, 2.29478683682e-13, tol=3e-3)

    # DifferentialEvolutionSolver2
    if verbose: print("\nUsing DifferentialEvolutionSolver2:")
    npop = 40
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.strategy import Rand1Bin
    esow = Monitor()
    ssow = Monitor()
    solver = DifferentialEvolutionSolver2(ndim, npop)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = COG(1e-10)
    time1 = time.time()  # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Bin)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    if verbose:
        print("Solution: %s" % sol)
        print("f value: %s" % fx)
        print("Iterations: %s" % solver.generations)
        print("Function evaluations: %s" % len(esow.x))
        print("Time elapsed: %s seconds" % time_elapsed)
    assert almostEqual(fx, 3.84824937598e-15, tol=3e-3)

    # NelderMeadSimplexSolver
    if verbose: print("\nUsing NelderMeadSimplexSolver:")
    from mystic.solvers import NelderMeadSimplexSolver
    from mystic.termination import CandidateRelativeTolerance as CRT
    esow = Monitor()
    ssow = Monitor()
    solver = NelderMeadSimplexSolver(ndim)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = CRT()
    time1 = time.time()  # Is this an ok way of timing?
    solver.Solve(costfunc, term)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    if verbose:
        print("Solution: %s" % sol)
        print("f value: %s" % fx)
        print("Iterations: %s" % solver.generations)
        print("Function evaluations: %s" % len(esow.x))
        print("Time elapsed: %s seconds" % time_elapsed)
    assert almostEqual(fx, 1.08732211477e-09, tol=3e-3)

    # PowellDirectionalSolver
    if verbose: print("\nUsing PowellDirectionalSolver:")
    from mystic.solvers import PowellDirectionalSolver
    from mystic.termination import NormalizedChangeOverGeneration as NCOG
    esow = Monitor()
    ssow = Monitor()
    solver = PowellDirectionalSolver(ndim)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = NCOG(1e-10)
    time1 = time.time()  # Is this an ok way of timing?
    solver.Solve(costfunc, term)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    if verbose:
        print("Solution: %s" % sol)
        print("f value: %s" % fx)
        print("Iterations: %s" % solver.generations)
        print("Function evaluations: %s" % len(esow.x))
        print("Time elapsed: %s seconds" % time_elapsed)
    assert almostEqual(fx, 0.0, tol=3e-3)
Esempio n. 46
0
def test_rosenbrock():
    """Test the 2-dimensional Rosenbrock function.

Testing 2-D Rosenbrock:
Expected: x=[1., 1.] and f=0

Using DifferentialEvolutionSolver:
Solution:  [ 1.00000037  1.0000007 ]
f value:  2.29478683682e-13
Iterations:  99
Function evaluations:  3996
Time elapsed:  0.582273006439  seconds

Using DifferentialEvolutionSolver2:
Solution:  [ 0.99999999  0.99999999]
f value:  3.84824937598e-15
Iterations:  100
Function evaluations:  4040
Time elapsed:  0.577210903168  seconds

Using NelderMeadSimplexSolver:
Solution:  [ 0.99999921  1.00000171]
f value:  1.08732211477e-09
Iterations:  70
Function evaluations:  130
Time elapsed:  0.0190329551697  seconds

Using PowellDirectionalSolver:
Solution:  [ 1.  1.]
f value:  0.0
Iterations:  28
Function evaluations:  859
Time elapsed:  0.113857030869  seconds
"""

    print "Testing 2-D Rosenbrock:"
    print "Expected: x=[1., 1.] and f=0"
    from mystic.models import rosen as costfunc
    ndim = 2
    lb = [-5.]*ndim
    ub = [5.]*ndim
    x0 = [2., 3.]
    maxiter = 10000
    
    # DifferentialEvolutionSolver
    print "\nUsing DifferentialEvolutionSolver:"
    npop = 40
    from mystic.solvers import DifferentialEvolutionSolver
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.strategy import Rand1Bin
    esow = Monitor()
    ssow = Monitor() 
    solver = DifferentialEvolutionSolver(ndim, npop)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = COG(1e-10)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Bin)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 2.29478683682e-13, tol=3e-3)

    # DifferentialEvolutionSolver2
    print "\nUsing DifferentialEvolutionSolver2:"
    npop = 40
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.strategy import Rand1Bin
    esow = Monitor()
    ssow = Monitor() 
    solver = DifferentialEvolutionSolver2(ndim, npop)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = COG(1e-10)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Bin)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 3.84824937598e-15, tol=3e-3)

    # NelderMeadSimplexSolver
    print "\nUsing NelderMeadSimplexSolver:"
    from mystic.solvers import NelderMeadSimplexSolver
    from mystic.termination import CandidateRelativeTolerance as CRT
    esow = Monitor()
    ssow = Monitor() 
    solver = NelderMeadSimplexSolver(ndim)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = CRT()
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 1.08732211477e-09, tol=3e-3)

    # PowellDirectionalSolver
    print "\nUsing PowellDirectionalSolver:"
    from mystic.solvers import PowellDirectionalSolver
    from mystic.termination import NormalizedChangeOverGeneration as NCOG
    esow = Monitor()
    ssow = Monitor() 
    solver = PowellDirectionalSolver(ndim)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = NCOG(1e-10)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 0.0, tol=3e-3)
Esempio n. 47
0
if __name__ == '__main__':

    from mystic.solvers import diffev2, lattice
    from mystic.math import almostEqual
    from mystic.monitors import Monitor, VerboseMonitor
    mon = VerboseMonitor(10)  #,10)

    #result = lattice(objective, 12, 960, bounds=bounds, penalty=penalty, constraints=constraint, ftol=1e-6, xtol=1e-6, disp=True, full_output=True, itermon=mon, maxiter=25)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=80, gtol=100, disp=True, full_output=True)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, constraints=constraint, npop=1000, ftol=1e-8, gtol=50, disp=True, full_output=True, cross=0.9, scale=0.9, itermon=mon)
    #FIXME: SOLVES at about 20%?... but w/ last 2 fixed 90%?
    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     penalty=penalty,
                     constraints=constraint,
                     npop=360,
                     ftol=1e-8,
                     gtol=200,
                     disp=True,
                     full_output=True,
                     cross=0.2,
                     scale=0.9,
                     itermon=mon)

    print result[0]
    assert almostEqual(result[0], xs, tol=1e-8)  #XXX: fails b/c rel & zero?
    assert almostEqual(result[1], ys, tol=1e-4)

# EOF
Esempio n. 48
0
solver = as_constraint(penalty)
#solver = discrete(range(11))(solver)  #XXX: MOD = range(11) instead of LARGE
#FIXME: constrain to 'int' with discrete is very fragile!  required #MODs

def constraint(x):
    from numpy import round
    return round(solver(x))

# better is to constrain to integers, penalize otherwise
from mystic.constraints import integers

@integers()
def round(x):
  return x


if __name__ == '__main__':

    from mystic.solvers import diffev2
    from mystic.math import almostEqual

    result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, constraints=round, npop=30, gtol=50, disp=True, full_output=True)

    print(result[0])
    assert almostEqual(result[0], xs, tol=1e-8) #XXX: fails b/c rel & zero?
    assert almostEqual(result[1], ys, tol=1e-4)


# EOF
Esempio n. 49
0

#   # test fcalls <= maxfun
#   assert my_x[3] <= maxfun
    if maxiter is not None:
        # test iters <= maxiter
        assert my_x[2] <= maxiter
    return

if __name__ == '__main__':
    x0 = [0, 0, 0]

    # check solutions versus results based on the random_seed
    # print("comparing against known results")
    sol = solvers.diffev(rosen, x0, npop=40, disp=0, full_output=True)
    assert almostEqual(sol[1], 0.0020640145337293249, tol=3e-3)
    sol = solvers.diffev2(rosen, x0, npop=40, disp=0, full_output=True)
    assert (almostEqual(sol[1], 0.0017516784703663288, tol=3e-3)
            or almostEqual(sol[1], 0.00496876027278, tol=3e-3))  # python3.x
    sol = solvers.fmin_powell(rosen, x0, disp=0, full_output=True)
    assert almostEqual(sol[1], 8.3173488898295291e-23)
    sol = solvers.fmin(rosen, x0, disp=0, full_output=True)
    assert almostEqual(sol[1], 1.1605792769954724e-09)

    solver2 = 'diffev2'
    for solver in ['diffev']:
        #   print("comparing %s and %s from mystic" % (solver, solver2))
        test_solvers(solver, solver2, x0, npop=40)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=0)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=1)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=2)
Esempio n. 50
0
xs  = polyfit(x, y, 1) 
xs_ = polyfit(x, y, 2) 
ys  = objective(xs, x, y)
ys_ = objective(xs_, x, y)


if __name__ == '__main__':

  from mystic.solvers import diffev2, fmin_powell
  from mystic.math import almostEqual
# from mystic.monitors import VerboseMonitor
# mon = VerboseMonitor(10)

  result = diffev2(objective, args=args, x0=bounds, bounds=bounds, npop=40, ftol=1e-8, gtol=100, disp=False, full_output=True)#, itermon=mon)
# print result[0], xs
  assert almostEqual(result[0], xs, rel=1e-1)
  assert almostEqual(result[1], ys, rel=1e-1)

  result = fmin_powell(objective, args=args, x0=[0.0,0.0], bounds=bounds, disp=False, full_output=True)
# print result[0], xs
  assert almostEqual(result[0], xs, rel=1e-1)
  assert almostEqual(result[1], ys, rel=1e-1)

# mon = VerboseMonitor(10)
  result = diffev2(objective, args=args, x0=bounds_, bounds=bounds_, npop=40, ftol=1e-8, gtol=100, disp=False, full_output=True)#, itermon=mon)
# print result[0], xs_
  assert almostEqual(result[0], xs_, tol=1e-1)
  assert almostEqual(result[1], ys_, rel=1e-1)

  result = fmin_powell(objective, args=args, x0=[0.0,0.0,0.0], bounds=bounds_, disp=False, full_output=True)
# print result[0], xs_
Esempio n. 51
0
    from mystic.solvers import diffev2
    from mystic.math import almostEqual
    from mystic.monitors import Monitor, VerboseMonitor
    mon = VerboseMonitor(10)  #,10)

    result = diffev2(objective,
                     x0=bounds,
                     bounds=bounds,
                     penalty=pf,
                     constraints=constraint,
                     npop=52,
                     ftol=1e-8,
                     gtol=1000,
                     disp=True,
                     full_output=True,
                     cross=0.1,
                     scale=0.9,
                     itermon=mon)
    # FIXME: solves at 0%... but w/ vowels fixed 80%?
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=52, ftol=1e-8, gtol=2000, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=130, ftol=1e-8, gtol=1000, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=260, ftol=1e-8, gtol=500, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)
    #result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=520, ftol=1e-8, gtol=100, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)

    print(result[0])
    assert almostEqual(result[0], xs, tol=1e-8)
    assert almostEqual(result[1], ys, tol=1e-4)

# EOF
Esempio n. 52
0
File: g08.py Progetto: jcfr/mystic
from mystic.symbolic import generate_constraint, generate_solvers, solve
from mystic.symbolic import generate_penalty, generate_conditions

equations = """
x0**2 - x1 + 1.0 <= 0.0
1.0 - x0 + (x1 - 4)**2 <= 0.0
"""
#cf = generate_constraint(generate_solvers(solve(equations))) #XXX: inequalities
pf = generate_penalty(generate_conditions(equations), k=1e12)

from mystic.constraints import as_constraint

cf = as_constraint(pf)



if __name__ == '__main__':

    from mystic.solvers import buckshot
    from mystic.math import almostEqual

    result = buckshot(objective, 2, 40, bounds=bounds, penalty=pf, disp=False, full_output=True)

    assert almostEqual(result[0], xs, tol=1e-2)
    assert almostEqual(result[1], ys, rel=1e-2)



# EOF
Esempio n. 53
0

#   # test fcalls <= maxfun
#   assert my_x[3] <= maxfun
    if maxiter is not None:
        # test iters <= maxiter
        assert my_x[2] <= maxiter
    return

if __name__ == '__main__':
    x0 = [0, 0, 0]

    # check solutions versus results based on the random_seed
    print "comparing against known results"
    sol = solvers.diffev(rosen, x0, npop=40, disp=0, full_output=True)
    assert almostEqual(sol[1], 0.0020640145337293249, tol=3e-3)
    sol = solvers.diffev2(rosen, x0, npop=40, disp=0, full_output=True)
    assert almostEqual(sol[1], 0.0017516784703663288, tol=3e-3)
    sol = solvers.fmin_powell(rosen, x0, disp=0, full_output=True)
    assert almostEqual(sol[1], 8.3173488898295291e-23)
    sol = solvers.fmin(rosen, x0, disp=0, full_output=True)
    assert almostEqual(sol[1], 1.1605792769954724e-09)

    solver2 = 'diffev2'
    for solver in ['diffev']:
        print "comparing %s and %s from mystic" % (solver, solver2)
        test_solvers(solver, solver2, x0, npop=40)
        #   test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=0)
        #   test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=1)
        #   test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=2)
        #   test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=9)