示例#1
0
文件: arl.py 项目: LanceFiondella/ARL
    def mysticCompute(self):
        #for b in range(10**5, budget,  ba):
        for b in range(0, int(self.ba), int(self.ba / self.intermediate)):
            equations = "{} <= {}".format(
                "+".join(([
                    "x{}".format(i) for i in range(len(self.componentIndices))
                ])), b)
            #print(equations)
            cf = generate_constraint(generate_solvers(simplify(equations)))
            pf = generate_penalty(generate_conditions(equations))
            x0 = [
                b / len(self.componentIndices) for i in self.componentIndices
            ]
            bounds = [(0, b) for i in self.componentIndices]
            result = diffev(self.sys_avail,
                            x0=x0,
                            bounds=bounds,
                            constraints=cf,
                            penalty=pf,
                            npop=10,
                            disp=True)
            #print(result)
            self.results.append(result)
            self.avail_.append(-self.sys_avail(result))
            self.budget_.append(b)

            #print(self.n_gamma(result))
            self.eta_.append(self.n_gamma(result))
        return self.results
示例#2
0
def mysticCompute():
    #for b in range(10**5, budget,  ba):
    results = []
    for b in range(10**5, ba, (ba-10**5)/20):
        equations = "{} <= {}".format("+".join((["x{}".format(i) for i in range(len(componentIndices))])), b)
        print(equations)
        cf = generate_constraint(generate_solvers(simplify(equations)))
        pf = generate_penalty(generate_conditions(equations))
        x0 = [b/len(componentIndices) for i in componentIndices]
        bounds = [(0,b) for i in componentIndices]
        result = diffev(sys, x0=x0, bounds=bounds, constraints=cf, penalty=pf, npop=10, disp=True)
        print(result)
        results.append(result)
        avail_.append(-sys(result))
        budget_.append(b)
                
        print(eta(result))
        eta_.append(eta(result))
    return results
示例#3
0
def test_as_constraint():

    from mystic.math.measures import mean, spread

    def mean_constraint(x, target):
        return mean(x) - target

    def range_constraint(x, target):
        return spread(x) - target

    @quadratic_equality(condition=range_constraint, kwds={'target': 5.0})
    @quadratic_equality(condition=mean_constraint, kwds={'target': 5.0})
    def penalty(x):
        return 0.0

    ndim = 3
    constraints = as_constraint(penalty)  #, solver='fmin')
    #XXX: this is expensive to evaluate, as there are nested optimizations

    from numpy import arange
    x = arange(ndim)
    _x = constraints(x)

    assert round(mean(_x)) == 5.0
    assert round(spread(_x)) == 5.0
    assert round(penalty(_x)) == 0.0

    def cost(x):
        return abs(sum(x) - 5.0)

    npop = ndim * 3
    from mystic.solvers import diffev
    y = diffev(cost, x, npop, constraints=constraints, disp=False, gtol=10)

    assert round(mean(y)) == 5.0
    assert round(spread(y)) == 5.0
    assert round(cost(y)) == 5.0 * (ndim - 1)
示例#4
0
def test_as_constraint():

  from mystic.math.measures import mean, spread
  def mean_constraint(x, target):
    return mean(x) - target

  def range_constraint(x, target):
    return spread(x) - target

  @quadratic_equality(condition=range_constraint, kwds={'target':5.0})
  @quadratic_equality(condition=mean_constraint, kwds={'target':5.0})
  def penalty(x):
    return 0.0

  ndim = 3
  constraints = as_constraint(penalty, solver='fmin')
  #XXX: this is expensive to evaluate, as there are nested optimizations

  from numpy import arange
  x = arange(ndim)
  _x = constraints(x)
  
  assert round(mean(_x)) == 5.0
  assert round(spread(_x)) == 5.0
  assert round(penalty(_x)) == 0.0

  def cost(x):
    return abs(sum(x) - 5.0)

  npop = ndim*3
  from mystic.solvers import diffev
  y = diffev(cost, x, npop, constraints=constraints, disp=False, gtol=10)

  assert round(mean(y)) == 5.0
  assert round(spread(y)) == 5.0
  assert round(cost(y)) == 5.0*(ndim-1)
示例#5
0
from mystic import suppressed


@suppressed(1e-5)
def conserve(x):
    return constrain(x)


#from mystic.monitors import VerboseMonitor
#mon = VerboseMonitor(10)

# solve the dual for alpha
from mystic.solvers import diffev
alpha = diffev(objective, list(zip(lb,ub)), args=(Q,b), npop=nx*3, gtol=200, \
#              itermon=mon, \
               ftol=1e-8, bounds=list(zip(lb,ub)), constraints=conserve, disp=1)

print('solved x: %s' % alpha)
print("constraint A*x == 0: %s" % inner(Aeq, alpha))
print("minimum 0.5*x'Qx + b'*x: %s" % objective(alpha, Q, b))

# calculate weight vectors, support vectors, and bias
wv = WeightVector(alpha, X, y)
sv1, sv2 = SupportVectors(alpha, y, epsilon=1e-6)
bias = Bias(alpha, X, y)

ym = (y.flatten() < 0).nonzero()[0]
yp = (y.flatten() > 0).nonzero()[0]
ii = inner(wv, X)
bias2 = -0.5 * (max(ii[ym]) + min(ii[yp]))
示例#6
0
sol = my.fmin_powell(objective,
                     x0,
                     constraint=cons,
                     penalty=pens,
                     disp=False,
                     bounds=bounds,
                     gtol=3,
                     ftol=1e-6,
                     full_output=True,
                     args=(-1, ))

assert mm.almostEqual(sol[0], xs, tol=1e-2)
assert mm.almostEqual(sol[1], ys, tol=1e-2)

sol = my.diffev(objective,
                bounds,
                constraint=cons,
                penalty=pens,
                disp=False,
                bounds=bounds,
                npop=15,
                gtol=100,
                ftol=1e-6,
                full_output=True,
                args=(-1, ))

assert mm.almostEqual(sol[0], xs, tol=1e-2)
assert mm.almostEqual(sol[1], ys, tol=1e-2)

# EOF
示例#7
0
     generate_solvers as solvers, generate_constraint as constraint
constrain = linear_symbolic(Aeq,Beq)
constrain = constraint(solvers(solve(constrain,target=['x0'])))

from mystic import suppressed
@suppressed(1e-5)
def conserve(x):
    return constrain(x)

from mystic.monitors import VerboseMonitor
mon = VerboseMonitor(10)

# solve for alpha
from mystic.solvers import diffev
alpha = diffev(objective, list(zip(lb,.1*ub)), args=(Q,b), npop=N*3, gtol=200, \
               itermon=mon, \
               ftol=1e-5, bounds=list(zip(lb,ub)), constraints=conserve, disp=1)

print('solved x: %s' % alpha)
print("constraint A*x == 0: %s" % inner(Aeq, alpha))
print("minimum 0.5*x'Qx + b'*x: %s" % objective(alpha, Q, b))

# calculate support vectors and regression function
sv1 = SupportVectors(alpha[:nx])
sv2 = SupportVectors(alpha[nx:])
R = RegressionFunction(x, y, alpha, svr_epsilon, LinearKernel)

print('support vectors: %s %s' % (sv1, sv2))

# plot data
pylab.plot(x, y, 'k+', markersize=10)
示例#8
0

if __name__ == '__main__':

    print("Differential Evolution")
    print("======================")

    # set range for random initial guess
    ndim = 9
    x0 = [(-100, 100)] * ndim
    random_seed(321)

    # draw frame and exact coefficients
    plot_exact()

    # use DE to solve 8th-order Chebyshev coefficients
    npop = 10 * ndim
    solution = diffev(chebyshev8cost, x0, npop)

    # use pretty print for polynomials
    print(poly1d(solution))

    # compare solution with actual 8th-order Chebyshev coefficients
    print("\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs))

    # plot solution versus exact coefficients
    plot_solution(solution)
    getch()

# end of file
示例#9
0
文件: example07.py 项目: jcfr/mystic

if __name__ == '__main__':

    print "Differential Evolution"
    print "======================"

    # set range for random initial guess
    ndim = 9
    x0 = [(-100,100)]*ndim
    random_seed(321)

    # draw frame and exact coefficients
    plot_exact()

    # use DE to solve 8th-order Chebyshev coefficients
    npop = 10*ndim
    solution = diffev(chebyshev8cost,x0,npop)

    # use pretty print for polynomials
    print poly1d(solution)

    # compare solution with actual 8th-order Chebyshev coefficients
    print "\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs)

    # plot solution versus exact coefficients
    plot_solution(solution)
    getch()

# end of file
示例#10
0
def main():
    range = [(-100.0, 100.0)] * ND
    solution = diffev(ChebyshevCost,range,NP,bounds=None,ftol=0.01,\
                      maxiter=MAX_GENERATIONS,cross=1.0,scale=0.9)
    return solution
    print(info_gain)
    return (-1)*info_gain


# In[4]:


from time import time
import mystic
from mystic.solvers import diffev
from mystic.monitors import VerboseMonitor
mon = VerboseMonitor(10)
path_vars=[]
info_gains_path=[]
start_time=time()
res = diffev(maximize, x0=(450,900), bounds=bounds, maxiter=5, itermon=mon, disp=False, full_output=True, ftol=1e-3)
end_time=time()


# In[5]:


print(res)
print(end_time-start_time)


# In[6]:


import pickle
示例#12
0
pens = ms.generate_penalty(ms.generate_conditions(eqns), k=1e3)
bounds = [(0., None), (0., 4.)]

# get the objective
def objective(x):
  x = np.asarray(x)
  return x[0]**2 + 4*x[1]**2 - 32*x[1] + 64

x0 = np.random.rand(2)

# compare against the exact minimum
xs = np.array([2., 3.])
ys = objective(xs)


sol = my.fmin_powell(objective, x0, constraint=cons, penalty=pens, disp=False,
                     bounds=bounds, gtol=3, ftol=1e-6, full_output=True)

assert mm.almostEqual(sol[0], xs, tol=1e-2)
assert mm.almostEqual(sol[1], ys, tol=1e-2)


sol = my.diffev(objective, bounds, constraint=cons, penalty=pens, disp=False,
                bounds=bounds, npop=10, gtol=100, ftol=1e-6, full_output=True)

assert mm.almostEqual(sol[0], xs, tol=1e-2)
assert mm.almostEqual(sol[1], ys, tol=1e-2)


# EOF
示例#13
0
    def optimize(self, optversion, method = 'diff-ev', gtol = 5000, positive_weights: bool = True, x0 = None, bs0 = None, bounds = [0., 1.], noisebiasconstr = False, fb = 1., inv_variance = False, verbose = True, noiseparameter = 1.):
        '''
        Methods: diff-ev, SLSQP
        '''
        
        if verbose:
            print(f'Start optimization with {method}')

            if inv_variance:
                print('Using combined inverse variance weights')

        if x0 is None:
            x0 = []
            if self.lenestimators == 2:
                v = np.random.rand(1)/2
            elif self.lenestimators == 3:
                v = np.random.rand(2)/2
            elif self.lenestimators == 4:
                v = np.random.rand(3)/2
            for a in v:
                x0 += [a]
            x0 += [1.-np.sum(v)]
            x0 = np.array(x0*int(self.nbins))

        if bs0 is None:
            bs0 = np.ones(int(self.nbins))
            norma = self.integerate_discrete(bs0, self.ells_selected)
            bs0 /= norma
        
        dims = (self.lenestimators+1) if not inv_variance else self.lenestimators
        bnds = [(bounds[0], bounds[1]) for i in range(dims*self.nbins)]
        bnds = tuple(bnds)

        if inv_variance:
            x0 = x0
        else:
            x0 = np.append(x0, bs0)

        #if positive_weights:
        #    cons = ({'type': 'eq', 'fun': self.get_constraint()}, {'type': 'ineq', 'fun': self.get_constraint_ineq()})
        #else:
        #    cons = ({'type': 'eq', 'fun': self.get_constraint()})


        weights_name = optversion['weights_name']

        if verbose:
            print(f'Doing for {weights_name}')

        sum_biases_squared = optversion['sum_biases_squared']
        abs_biases = optversion['abs_biases']
        bias_squared = optversion['bias_squared']

        if abs_biases:
            prepare = lambda x: abs(x)
        else:
            prepare = lambda x: x

        f, noisef, biasf = self.get_f_n_b(self.ells_selected, self.theory_selected, self.theta_selected, prepare(self.biases_selected), sum_biases_squared = sum_biases_squared, bias_squared = bias_squared, fb = fb, inv_variance = inv_variance, noiseparameter = noiseparameter)
        self.f = f
        self.noisef = noisef
        self.biasf = biasf

        extra_constraint = lambda x: abs(self.noisef(np.array(x))-self.biasf(np.array(x)))


        def constraint_eq(x):
            x = np.array(x)
            a = self.get_a(x, inv_variance)
            a[:, -1] = 1-np.sum(a[:, :-1], axis = 1)
            if not inv_variance:
                x[:-self.nbins] = a.flatten()
            else:
                x = a.flatten()
            return x

        def penalty1(x):
            x = np.array(x)
            b = x[-self.nbins:]
            res = self.integerate_discrete(b, self.ells_selected)
            return 1-res

        k = 1e20

        if noisebiasconstr:
            gtol = gtol
            @quadratic_equality(condition=penalty1, k = k)
            @quadratic_equality(condition=extra_constraint, k = k)
            def penalty(x):
                return 0.0
        else:
            gtol = gtol
            @quadratic_equality(condition=penalty1, k = k)
            def penalty(x):
                return 0.0       

        if inv_variance:
            penalty = None 
            if noisebiasconstr:
                @quadratic_equality(condition=extra_constraint, k = k)
                def penalty(x):
                    return 0.0

        mon = VerboseMonitor(100)

        func = lambda x: f(np.array(x))
        result = my.diffev(func, x0, npop = 10*len(list(bnds)), bounds = bnds, ftol = 1e-11, gtol = gtol, maxiter = 1024**3, maxfun = 1024**3, constraints = constraint_eq, penalty = penalty, full_output=True, itermon=mon)

        result = Res(result[0], self.ells_selected)
        self.result = result
        
        ws = self.get_weights(result.x, inv_variance, verbose = verbose)        
        weights_per_l = self.get_final_variance_weights(result.x, self.ells_selected, self.theory_selected, self.theta_selected, inv_variance)

        result.set_weights(tuple(list(ws)+[weights_per_l]))
        
        self.monitor = mon
        
        return result
示例#14
0
     generate_solvers as solvers, generate_constraint as constraint
constrain = linear_symbolic(Aeq,Beq)
constrain = constraint(solvers(solve(constrain,target=['x0'])))

from mystic import suppressed
@suppressed(1e-5)
def conserve(x):
    return constrain(x)

from mystic.monitors import VerboseMonitor
mon = VerboseMonitor(10)

# solve for alpha
from mystic.solvers import diffev
alpha = diffev(objective, zip(lb,.1*ub), args=(Q,b), npop=N*3, gtol=200, \
               itermon=mon, \
               ftol=1e-5, bounds=zip(lb,ub), constraints=conserve, disp=1)

print 'solved x: ', alpha
print "constraint A*x == 0: ", inner(Aeq, alpha)
print "minimum 0.5*x'Qx + b'*x: ", objective(alpha, Q, b)

# calculate support vectors and regression function
sv1 = SupportVectors(alpha[:nx])
sv2 = SupportVectors(alpha[nx:])
R = RegressionFunction(x, y, alpha, svr_epsilon, LinearKernel)

print 'support vectors: ', sv1, sv2

# plot data
pylab.plot(x, y, 'k+', markersize=10)
示例#15
0
from mystic.symbolic import linear_symbolic, solve, \
     generate_solvers as solvers, generate_constraint as constraint
constrain = linear_symbolic(Aeq,Beq)
constrain = constraint(solvers(solve(constrain,target=['x0'])))

from mystic import suppressed
@suppressed(1e-5)
def conserve(x):
    return constrain(x)

#from mystic.monitors import VerboseMonitor
#mon = VerboseMonitor(1)

from mystic.solvers import diffev
alpha = diffev(objective, zip(lb,ub), args=(H,f), npop=nx*3, gtol=200, \
#              itermon=mon, \
               ftol=1e-8, bounds=zip(lb,ub), constraints=conserve, disp=1)

print 'solved x: ', alpha
print "constraint A*x == 0: ", inner(Aeq, alpha)
print "minimum 0.5*x'Hx + f'*x: ", objective(alpha, H, f)


# let's play. We will need to bootstrap the SMO with an initial
# state that belongs to the feasible set. Because of the special structure
# of SVMs, the zero vector suffices. We will use that here. 
# More generally, an initial point can be obtained via solving an LP.

def getIndexSets(alpha, a, b, y):
    """See Kerthi and Gilbert, and MathSVM.m"""
    # it is either one loop, or five smart ops.
示例#16
0
  if maxiter and maxfun is not None:
    assert my_x[2] == sp_x[-3]
    assert my_x[3] == sp_x[-2]
#   # test fcalls <= maxfun
#   assert my_x[3] <= maxfun
  if maxiter is not None:
    # test iters <= maxiter
    assert my_x[2] <= maxiter
  return 

if __name__ == '__main__':
  x0 = [0,0,0]

  # check solutions versus results based on the random_seed
# print "comparing against known results"
  sol = solvers.diffev(rosen, x0, npop=40, disp=0, full_output=True)
  assert almostEqual(sol[1], 0.0020640145337293249, tol=3e-3)
  sol = solvers.diffev2(rosen, x0, npop=40, disp=0, full_output=True)
  assert almostEqual(sol[1], 0.0017516784703663288, tol=3e-3)
  sol = solvers.fmin_powell(rosen, x0, disp=0, full_output=True)
  assert almostEqual(sol[1], 8.3173488898295291e-23)
  sol = solvers.fmin(rosen, x0, disp=0, full_output=True)
  assert almostEqual(sol[1], 1.1605792769954724e-09)

  solver2 = 'diffev2'
  for solver in ['diffev']:
#   print "comparing %s and %s from mystic" % (solver, solver2)
    test_solvers(solver, solver2, x0, npop=40)
    test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=0)
    test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=1)
    test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=2)
示例#17
0
    return constrain(x)


from mystic.monitors import VerboseMonitor

mon = VerboseMonitor(10)

# solve for alpha
from mystic.solvers import diffev

alpha = diffev(
    objective,
    zip(lb, 0.1 * ub),
    args=(Q, b),
    npop=N * 3,
    gtol=400,
    itermon=mon,
    ftol=1e-5,
    bounds=zip(lb, ub),
    constraints=conserve,
    disp=1,
)

print "solved x: ", alpha
print "constraint A*x == 0: ", inner(Aeq, alpha)
print "minimum 0.5*x'Qx + b'*x: ", objective(alpha, Q, b)

# calculate support vectors and regression function
sv1 = SupportVectors(alpha[:nx])
sv2 = SupportVectors(alpha[nx:])
R = RegressionFunction(x, y, alpha, svr_epsilon, pk)
        assert my_x[3] == sp_x[-2]


#   # test fcalls <= maxfun
#   assert my_x[3] <= maxfun
    if maxiter is not None:
        # test iters <= maxiter
        assert my_x[2] <= maxiter
    return

if __name__ == '__main__':
    x0 = [0, 0, 0]

    # check solutions versus results based on the random_seed
    # print "comparing against known results"
    sol = solvers.diffev(rosen, x0, npop=40, disp=0, full_output=True)
    assert almostEqual(sol[1], 0.0020640145337293249, tol=3e-3)
    sol = solvers.diffev2(rosen, x0, npop=40, disp=0, full_output=True)
    assert almostEqual(sol[1], 0.0017516784703663288, tol=3e-3)
    sol = solvers.fmin_powell(rosen, x0, disp=0, full_output=True)
    assert almostEqual(sol[1], 8.3173488898295291e-23)
    sol = solvers.fmin(rosen, x0, disp=0, full_output=True)
    assert almostEqual(sol[1], 1.1605792769954724e-09)

    solver2 = 'diffev2'
    for solver in ['diffev']:
        #   print "comparing %s and %s from mystic" % (solver, solver2)
        test_solvers(solver, solver2, x0, npop=40)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=0)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=1)
        test_solvers(solver, solver2, x0, npop=40, maxiter=None, maxfun=2)
示例#19
0
def main():
    range = [(-100.0, 100.0)] * ND
    solution = diffev(ChebyshevCost, range, NP, bounds=None, ftol=0.01, maxiter=MAX_GENERATIONS, cross=1.0, scale=0.9)
    return solution