Пример #1
0
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))

def df(x):
    r = 2*(x-M)
    r[0] += 15 #incorrect derivative
    r[8] += 80 #incorrect derivative
    return r
p.df =  df

p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]

def dc(x):
    r = zeros((2, p.n))
    r[0,0] = 2 * 4 * x[0]**3
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2] + 15 #incorrect derivative
    return r
p.dc = dc

p.h = lambda x: (1e1*(x[-1]-1)**4, (x[-2]-1.5)**4)

def dh(x):
    r = zeros((2, p.n))
    r[0,-1] = 1e1*4*(x[-1]-1)**3
    r[1,-2] = 4*(x[-2]-1.5)**3 + 15 #incorrect derivative
    return r
Пример #2
0
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
        p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
        p.maxIter=50
    #    p.maxfun=100

        #p.df_iter = 50
        p.maxTime = 4000
        h_args=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)

        if 0:
            #p.h=[pos_sum,neg_sum]
            p.h=[pos_sum,neg_sum]
            p.c=[chisq]
    #    p.h=[pos_sum,neg_sum]
            p.args.h=h_args
            p.args.c=h_args
            p.dh=[pos_sum_grad,neg_sum_grad]
            p.df=chisq_grad
        if 1:

            #p.h=[pos_sum,neg_sum,chisq]
            p.c=[chisq]
            p.h=[pos_sum,neg_sum]
            p.args.h=h_args
            p.args.c=h_args
            p.dh=[pos_sum_grad,neg_sum_grad]
            p.dc=chisq_grad
            #p.dh=[pos_sum_grad,neg_sum_grad,neg_sum_grad]
Пример #3
0
# so p.b = array([1.5, -1.6, -825]) or p.b = (1.5, -1.6, -825) are valid as well


# Aeq x = beq
# x20+x21 = 2.5
p.Aeq = zeros(N)
p.Aeq[20:22] = 1
p.beq = 2.5


# non-linear inequality constraints c(x) <= 0
# 2*x0^4 <= 1/32
# x1^2+x2^2 <= 1/8
# x25^2 +x25*x35 + x35^2<= 2.5

p.c = lambda x: [2* x[0] **4-1./32, x[1]**2+x[2]**2 - 1./8, x[25]**2 + x[35]**2 + x[25]*x[35] -2.5]
# other valid c:
# p.c = [lambda x: c1(x), lambda x : c2(x), lambda x : c3(x)]
# p.c = (lambda x: c1(x), lambda x : c2(x), lambda x : c3(x))
# p.c = lambda x: numpy.array(c1(x), c2(x), c3(x))
# def c(x):
#      return c1(x), c2(x), c3(x)
# p.c = c


# dc(x)/dx: non-lin ineq constraints gradients (optional):
def DC(x):
    r = zeros((3, N))
    r[0,0] = 2 * 4 * x[0]**3
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2]
Пример #4
0
        # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
        # so it should be something like 1e-3...1e-5
        p.gradtol = 1e-5  #5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
        p.maxIter = 50
        #    p.maxfun=100

        #p.df_iter = 50
        p.maxTime = 4000
        h_args = (h, k, l, fq, fqerr, x, z, cosmat_list, coslist, flist)

        if 0:
            #p.h=[pos_sum,neg_sum]
            p.h = [pos_sum, neg_sum]
            p.c = [chisq]
            #    p.h=[pos_sum,neg_sum]
            p.args.h = h_args
            p.args.c = h_args
            p.dh = [pos_sum_grad, neg_sum_grad]
            p.df = chisq_grad
        if 1:

            #p.h=[pos_sum,neg_sum,chisq]
            p.c = [chisq]
            p.h = [pos_sum, neg_sum]
            p.args.h = h_args
            p.args.c = h_args
            p.dh = [pos_sum_grad, neg_sum_grad]
            p.dc = chisq_grad
            #p.dh=[pos_sum_grad,neg_sum_grad,neg_sum_grad]
Пример #5
0
Note! For oofun handling user parameters is performed 
in the same way: 
my_oofun.args = (...)
they will be passed to derivative function as well (if you have supplied it)
"""

from openopt import NLP
from numpy import asfarray

f = lambda x, a: (x**2).sum() + a * x[0]**4
x0 = [8, 15, 80]
p = NLP(f, x0)


#using c(x)<=0 constraints
p.c = lambda x, b, c: (x[0]-4)**2 - 1 + b*x[1]**4 + c*x[2]**4

#using h(x)=0 constraints
p.h = lambda x, d: (x[2]-4)**2 + d*x[2]**4 - 15
    
p.args.f = 4 # i.e. here we use a=4
# so it's the same to "a = 4; p.args.f = a" or just "p.args.f = a = 4" 

p.args.c = (1,2)

p.args.h = 15 

# Note 1: using tuple p.args.h = (15,) is valid as well

# Note 2: if all your funcs use same args, you can just use 
# p.args = (your args)
Пример #6
0
Example of using additional parameters for user f, c, h functions
Note! For oofun handling user parameters is performed 
in the same way: 
my_oofun.args = (...)
they will be passed to derivative function as well (if you have supplied it)
"""

from openopt import NLP
from numpy import asfarray

f = lambda x, a: (x**2).sum() + a * x[0]**4
x0 = [8, 15, 80]
p = NLP(f, x0)

#using c(x)<=0 constraints
p.c = lambda x, b, c: (x[0] - 4)**2 - 1 + b * x[1]**4 + c * x[2]**4

#using h(x)=0 constraints
p.h = lambda x, d: (x[2] - 4)**2 + d * x[2]**4 - 15

p.args.f = 4  # i.e. here we use a=4
# so it's the same to "a = 4; p.args.f = a" or just "p.args.f = a = 4"

p.args.c = (1, 2)

p.args.h = 15

# Note 1: using tuple p.args.h = (15,) is valid as well

# Note 2: if all your funcs use same args, you can just use
# p.args = (your args)