Пример #1
0
    g_con[0,1] = -2.
    g_con[0,2] = 1.
    g_con[0,3] = 1.
    g_con[1,0] = 4.
    g_con[1,1] = 5.
    g_con[1,2] = 1.
    g_con[1,3] = 1.

    print g_con
    fail = 0

    return g_obj, g_con, fail


# Instantiate a pyOpt model
opt_prob = Optimization('simple LP',objfunc)

# Add variables
opt_prob.addVar('x1','c',lower=0.0,upper=numpy.inf,value=10.0)
opt_prob.addVar('x2','c',lower=0.0,upper=numpy.inf,value=10.0)
opt_prob.addVar('x3','c',lower=0.0,upper=numpy.inf,value=10.0)
opt_prob.addVar('x4','c',lower=0.0,upper=numpy.inf,value=10.0)

# Add objective name
opt_prob.addObj('f')

# Add constraints
opt_prob.addCon('g1','e',equal=15.0)
opt_prob.addCon('g2','e',equal=36.0)

# print all above info 
Пример #2
0

def gradfunc(x, f, g):

    g_obj = [0.0] * 2
    g_obj[0] = 2. * x[0]
    g_obj[1] = 2. * x[1]

    g_con = None

    fail = 0
    return g_obj, g_con, fail


# Instantiate a pyOpt model
opt_prob = Optimization('simple QP', objfunc)

# Add variables
opt_prob.addVar('x1', 'c', lower=-numpy.inf, upper=numpy.inf, value=10.0)
opt_prob.addVar('x2', 'c', lower=-numpy.inf, upper=numpy.inf, value=10.0)

# Add objective name
opt_prob.addObj('f')

# print all above info
print opt_prob

# Choose a sensitivity type
sens_type = gradfunc

# Convert the pyOpt model to a NLPy model
    return f,g, fail

def gradfunc(x,f,g):

    g_obj = [0.0]*2
    g_obj[0] = 2.*x[0]
    g_obj[1] = 2.*x[1]

    g_con = None

    fail = 0
    return g_obj, g_con, fail


# Instantiate a pyOpt model
opt_prob = Optimization('simple QP',objfunc)

# Add variables
opt_prob.addVar('x1','c',lower=-numpy.inf,upper=numpy.inf,value=10.0)
opt_prob.addVar('x2','c',lower=-numpy.inf,upper=numpy.inf,value=10.0)

# Add objective name
opt_prob.addObj('f')

# print all above info 
print opt_prob

# Choose a sensitivity type
sens_type = gradfunc

# Convert the pyOpt model to a NLPy model