コード例 #1
0
ファイル: nerdy.py プロジェクト: bowlofstew/historical_data
def wls_fit(function, initial_guess, X, Y, weights=None, lb=None, ub=None):
    """[Inputs]
        function is of form:
            def function(coeffs, xdata)
    """

    if weights is None:
        weights = [1] * len(X)

    def penalty(c):
        fit = function(c, X)
        error = (weights * (Y - fit) ** 2).sum()
        return error

    problem = NLP(penalty, initial_guess)

    if lb is not None:
        problem.lb = lb
    if ub is not None:
        problem.ub = ub

    solver = 'ipopt'
    result = problem.solve(solver)

    coeffs = result.xf
    return coeffs
コード例 #2
0
    #    print 'pos',pos_sum(p0)
    #    print 'neg',neg_sum(p0)
    p = NLP(Entropy, p0, maxIter=1e3, maxFunEvals=1e5)
    #p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)
    # f(x) gradient (optional):
    #    p.df = S_grad
    #    p.d2f=S_hessian
    #    p.userProvided.d2f=True

    # lb<= x <= ub:
    # x4 <= -2.5
    # 3.5 <= x5 <= 4.5
    # all other: lb = -5, ub = +15
    #p.lb =1e-7*N.ones(p.n)
    #p.ub = N.ones(p.n)
    p.lb = 1e-7 * N.ones(p0.shape)
    p.ub = N.ones(p0.shape)
    #p.ub[4] = -2.5
    #p.lb[5], p.ub[5] = 3.5, 4.5

    # non-linear inequality constraints c(x) <= 0
    # 2*x0^4 <= 1/32
    # x1^2+x2^2 <= 1/8
    # x25^2 +x25*x35 + x35^2<= 2.5

    #p.c = lambda x: [2* x[0] **4-1./32, x[1]**2+x[2]**2 - 1./8, x[25]**2 + x[35]**2 + x[25]*x[35] -2.5]
    # other valid c:
    # p.c = [lambda x: c1(x), lambda x : c2(x), lambda x : c3(x)]
    # p.c = (lambda x: c1(x), lambda x : c2(x), lambda x : c3(x))
    # p.c = lambda x: numpy.array(c1(x), c2(x), c3(x))
    # def c(x):
コード例 #3
0
ファイル: nlp_2.py プロジェクト: AlbertHolmes/openopt
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2]
    return r
p.dc = dc

h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
p.h = lambda x: (h1(x), h2(x))

def dh(x):
    r = zeros((2, p.n))
    r[0,-1] = 1e1*4*(x[-1]-1)**3
    r[1,-2] = 4*(x[-2]-1.5)**3
    return r
p.dh = dh

p.lb = -6*ones(N)
p.ub = 6*ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5

#r = p.solve('ipopt', showLS=0, xtol=1e-7, maxIter = 1504)
#solver = 'ipopt'
solver = 'ralg'
#solver = 'scipy_slsqp'
#solver = 'algencan'
r = p.solve(solver, maxIter = 1504, plot=1)
#!! fmin_cobyla can't use user-supplied gradient
#r = p.solve('scipy_cobyla')

コード例 #4
0
ファイル: nlp_2.py プロジェクト: javafx2010/OOSuite
p.dc = dc

h1 = lambda x: 1e1 * (x[-1] - 1)**4
h2 = lambda x: (x[-2] - 1.5)**4
p.h = lambda x: (h1(x), h2(x))


def dh(x):
    r = zeros((2, p.n))
    r[0, -1] = 1e1 * 4 * (x[-1] - 1)**3
    r[1, -2] = 4 * (x[-2] - 1.5)**3
    return r


p.dh = dh

p.lb = -6 * ones(N)
p.ub = 6 * ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5

#r = p.solve('ipopt', showLS=0, xtol=1e-7, maxIter = 1504)
#solver = 'ipopt'
solver = 'ralg'
#solver = 'scipy_slsqp'
#solver = 'algencan'
r = p.solve(solver, maxIter=1504, plot=1)
#!! fmin_cobyla can't use user-supplied gradient
#r = p.solve('scipy_cobyla')
コード例 #5
0
    if 1:
        print len(p0)
        lowerm=1e-4*N.ones(len(p0))
        #lowerm[0:3]=[-1,-1,-1]
        upperm=N.ones(len(p0))
    if 1:
        p = NLP(Entropy, p0, maxIter = 1e3, maxFunEvals = 1e5)

    if 0:
        p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)


    if 0:
        p = NLP(max_wrap, p0, maxIter = 1e3, maxFunEvals = 1e5)
    if 0:
        p.lb=lowerm
        p.ub=upperm
        p.args.f=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
        p.plot = 0
        p.iprint = 1
        p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

    # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
    # (except maxfun, maxiter)
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
        p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
        p.maxIter=50
    #    p.maxfun=100
コード例 #6
0
ファイル: GUI_1.py プロジェクト: javafx2010/OOSuite
"""
OpenOpt GUI:
     function manage() usage example
"""

from openopt import NLP, manage
from numpy import cos, arange, ones, asarray, abs, zeros
N = 50
M = 5
p = NLP(lambda x: ((x - M)**2).sum(), cos(arange(N)))
p.lb, p.ub = -6 * ones(N), 6 * ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5
p.c = lambda x: [2 * x[0]**4 - 32, x[1]**2 + x[2]**2 - 8]
p.h = (lambda x: 1e1 * (x[-1] - 1)**4, lambda x: (x[-2] - 1.5)**4)
"""
minTime is used here
for to provide enough time for user
to play with GUI
"""

minTime = 1.5  # sec
p.name = 'GUI_example'
p.minTime = minTime
"""
hence maxIter, maxFunEvals etc
will not trigger till minTime

only same iter point x_k-1=x_k
or some coords = nan
can stop calculations
コード例 #7
0
ファイル: maxent_test.py プロジェクト: liuhuiwisdom/WRed
#    print 'neg',neg_sum(p0)
    p = NLP(Entropy, p0, maxIter = 1e3, maxFunEvals = 1e5)
    #p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)
    # f(x) gradient (optional):
#    p.df = S_grad
#    p.d2f=S_hessian
#    p.userProvided.d2f=True
    
    
    # lb<= x <= ub:
    # x4 <= -2.5
    # 3.5 <= x5 <= 4.5
    # all other: lb = -5, ub = +15
    #p.lb =1e-7*N.ones(p.n)
    #p.ub = N.ones(p.n)
    p.lb =1e-7*N.ones(p0.shape)
    p.ub = N.ones(p0.shape)
    #p.ub[4] = -2.5
    #p.lb[5], p.ub[5] = 3.5, 4.5

# non-linear inequality constraints c(x) <= 0
# 2*x0^4 <= 1/32
# x1^2+x2^2 <= 1/8
# x25^2 +x25*x35 + x35^2<= 2.5

#p.c = lambda x: [2* x[0] **4-1./32, x[1]**2+x[2]**2 - 1./8, x[25]**2 + x[35]**2 + x[25]*x[35] -2.5]
# other valid c:
# p.c = [lambda x: c1(x), lambda x : c2(x), lambda x : c3(x)]
# p.c = (lambda x: c1(x), lambda x : c2(x), lambda x : c3(x))
# p.c = lambda x: numpy.array(c1(x), c2(x), c3(x))
# def c(x):
コード例 #8
0
ファイル: nlp_ALGENCAN.py プロジェクト: javafx2010/OOSuite
N = 50
# objfunc:
# (x0-1)^4 + (x2-1)^4 + ... +(x49-1)^4 -> min (N=nVars=50)
f = lambda x : ((x-1)**4).sum()
x0 = cos(arange(N))
p = NLP(f, x0, maxIter = 1e3, maxFunEvals = 1e5)

# f(x) gradient (optional):
p.df = lambda x: 4*(x-1)**3


# lb<= x <= ub:
# x4 <= -2.5
# 3.5 <= x5 <= 4.5
# all other: lb = -5, ub = +15
p.lb = -5*ones(N)
p.ub = 15*ones(N)
p.ub[4] = -2.5
p.lb[5], p.ub[5] = 3.5, 4.5



# Ax <= b
# x0+...+xN>= 1.1*N
# x9 + x19 <= 1.5
# x10+x11 >= 1.6
p.A = zeros((3, N))
p.A[0, 9] = 1
p.A[0, 19] = 1
p.A[1, 10:12] = -1
p.A[2] = -ones(N)
コード例 #9
0
ファイル: maxent_test_iter2.py プロジェクト: ourobouros/WRed
        p0 = N.ones(M * 2) / (M)
    if 1:
        print len(p0)
        lowerm = 1e-4 * N.ones(len(p0))
        #lowerm[0:3]=[-1,-1,-1]
        upperm = N.ones(len(p0))
    if 1:
        p = NLP(Entropy, p0, maxIter=1e3, maxFunEvals=1e5)

    if 0:
        p = NLP(chisq, p0, maxIter=1e3, maxFunEvals=1e5)

    if 0:
        p = NLP(max_wrap, p0, maxIter=1e3, maxFunEvals=1e5)
    if 0:
        p.lb = lowerm
        p.ub = upperm
        p.args.f = (h, k, l, fq, fqerr, x, z, cosmat_list, coslist, flist)
        p.plot = 0
        p.iprint = 1
        p.contol = 1e-5  #3 # required constraints tolerance, default for NLP is 1e-6

        # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
        # (except maxfun, maxiter)
        # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
        # so it should be something like 1e-3...1e-5
        p.gradtol = 1e-5  #5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
        p.maxIter = 50
        #    p.maxfun=100
コード例 #10
0
ファイル: nlp_4.py プロジェクト: AlbertHolmes/openopt
from openopt import NLP

from numpy import cos, arange, ones, asarray, zeros, mat, array, sin, cos, sign, abs, inf
N = 1500
K = 50
# 1st arg - objective function
# 2nd arg - x0

p = NLP(lambda x: (abs(x-5)).sum(), 8*cos(arange(N)), iprint = 50, maxIter = 1e3)

# f(x) gradient (optional):
p.df = lambda x: sign(x-5)

p.lb = 5*ones(N) + sin(arange(N)) - 0.1
p.ub = 5*ones(N) + sin(arange(N)) + 0.1
p.lb[:N/4] = -inf
p.ub[3*N/4:] = inf

#p.ub[4] = 4
#p.lb[5], p.ub[5] = 8, 15

#A = zeros((K, N))
#b = zeros(K)
#for i in xrange(K):
#    A[i] = 1+cos(i+arange(N))
#    b[i] = sin(i)
#p.A = A
#p.b = b

#p.Aeq = zeros(p.n)
#p.Aeq[100:102] = 1
コード例 #11
0
ファイル: GUI_1.py プロジェクト: AlbertHolmes/openopt
"""
OpenOpt GUI:
     function manage() usage example
"""

from openopt import NLP, manage
from numpy import cos, arange, ones, asarray, abs, zeros
N = 50
M = 5
p = NLP(lambda x: ((x-M)**2).sum(), cos(arange(N)))
p.lb, p.ub = -6*ones(N), 6*ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
p.h = (lambda x: 1e1*(x[-1]-1)**4, lambda x: (x[-2]-1.5)**4)

"""
minTime is used here
for to provide enough time for user
to play with GUI
"""

minTime = 1.5 # sec
p.name = 'GUI_example'
p.minTime = minTime

"""
hence maxIter, maxFunEvals etc
will not trigger till minTime

only same iter point x_k-1=x_k
コード例 #12
0
from numpy import cos, arange, ones, asarray, zeros, mat, array, sin, cos, sign, abs, inf

N = 1500
K = 50
# 1st arg - objective function
# 2nd arg - x0

p = NLP(lambda x: (abs(x - 5)).sum(),
        8 * cos(arange(N)),
        iprint=50,
        maxIter=1e3)

# f(x) gradient (optional):
p.df = lambda x: sign(x - 5)

p.lb = 5 * ones(N) + sin(arange(N)) - 0.1
p.ub = 5 * ones(N) + sin(arange(N)) + 0.1
p.lb[:N / 4] = -inf
p.ub[3 * N / 4:] = inf

#p.ub[4] = 4
#p.lb[5], p.ub[5] = 8, 15

#A = zeros((K, N))
#b = zeros(K)
#for i in xrange(K):
#    A[i] = 1+cos(i+arange(N))
#    b[i] = sin(i)
#p.A = A
#p.b = b