Example #1
0
    def pipslopf_solver(self):
        pb = self.pb
        pb['x0'] = r_[self.var['Va'], self.var['Vm'], self.var['Pg'],
                      self.var['Qg']]
        f_fcn = lambda x: self.admmopf_costfcn(x)
        gh_fcn = lambda x: self.admmopf_consfcn(x)
        hess_fcn = lambda x, lmbda: self.admmopf_hessfcn(x, lmbda)

        pb['solution'] = pips(f_fcn, pb['x0'], pb['A'], pb['l'], pb['u'], \
         pb['xmin'], pb['xmax'], gh_fcn, hess_fcn)
        # print("Subproblem %d is solved" % (self.ID,))
        return pb['solution']
Example #2
0
def pipsopf_solver(om, ppopt, out_opt=None):
    """Solves AC optimal power flow using PIPS.

    Inputs are an OPF model object, a PYPOWER options vector and
    a dict containing keys (can be empty) for each of the desired
    optional output fields.

    outputs are a C{results} dict, C{success} flag and C{raw} output dict.

    C{results} is a PYPOWER case dict (ppc) with the usual baseMVA, bus
    branch, gen, gencost fields, along with the following additional
    fields:
        - C{order}      see 'help ext2int' for details of this field
        - C{x}          final value of optimization variables (internal order)
        - C{f}          final objective function value
        - C{mu}         shadow prices on ...
            - C{var}
                - C{l}  lower bounds on variables
                - C{u}  upper bounds on variables
            - C{nln}
                - C{l}  lower bounds on nonlinear constraints
                - C{u}  upper bounds on nonlinear constraints
            - C{lin}
                - C{l}  lower bounds on linear constraints
                - C{u}  upper bounds on linear constraints

    C{success} is C{True} if solver converged successfully, C{False} otherwise

    C{raw} is a raw output dict in form returned by MINOS
        - xr     final value of optimization variables
        - pimul  constraint multipliers
        - info   solver specific termination code
        - output solver specific output information

    @see: L{opf}, L{pips}

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    @author: Richard Lincoln
    """
    ##----- initialization -----
    ## optional output
    if out_opt is None:
        out_opt = {}

    ## options
    verbose = ppopt['VERBOSE']
    feastol = ppopt['PDIPM_FEASTOL']
    gradtol = ppopt['PDIPM_GRADTOL']
    comptol = ppopt['PDIPM_COMPTOL']
    costtol = ppopt['PDIPM_COSTTOL']
    max_it  = ppopt['PDIPM_MAX_IT']
    max_red = ppopt['SCPDIPM_RED_IT']
    step_control = (ppopt['OPF_ALG'] == 565)  ## OPF_ALG == 565, PIPS-sc
    if feastol == 0:
        feastol = ppopt['OPF_VIOLATION']
    opt = {  'feastol': feastol,
             'gradtol': gradtol,
             'comptol': comptol,
             'costtol': costtol,
             'max_it': max_it,
             'max_red': max_red,
             'step_control': step_control,
             'cost_mult': 1e-4,
             'verbose': verbose  }

    ## unpack data
    ppc = om.get_ppc()
    baseMVA, bus, gen, branch, gencost = \
        ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"], ppc["gencost"]
    vv, _, nn, _ = om.get_idx()

    ## problem dimensions
    nb = bus.shape[0]          ## number of buses
    nl = branch.shape[0]       ## number of branches
    ny = om.getN('var', 'y')   ## number of piece-wise linear costs

    ## linear constraints
    A, l, u = om.linear_constraints()

    ## bounds on optimization vars
    _, xmin, xmax = om.getv()

    ## build admittance matrices
    Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)

    ## try to select an interior initial point
    ll, uu = xmin.copy(), xmax.copy()
    ll[xmin == -Inf] = -1e10   ## replace Inf with numerical proxies
    uu[xmax ==  Inf] =  1e10
    x0 = (ll + uu) / 2
    Varefs = bus[bus[:, BUS_TYPE] == REF, VA] * (pi / 180)
    ## angles set to first reference angle
    x0[vv["i1"]["Va"]:vv["iN"]["Va"]] = Varefs[0]
    if ny > 0:
        ipwl = find(gencost[:, MODEL] == PW_LINEAR)
#         PQ = r_[gen[:, PMAX], gen[:, QMAX]]
#         c = totcost(gencost[ipwl, :], PQ[ipwl])
        c = gencost.flatten('F')[sub2ind(gencost.shape, ipwl, NCOST+2*gencost[ipwl, NCOST])]    ## largest y-value in CCV data
        x0[vv["i1"]["y"]:vv["iN"]["y"]] = max(c) + 0.1 * abs(max(c))
#        x0[vv["i1"]["y"]:vv["iN"]["y"]] = c + 0.1 * abs(c)

    ## find branches with flow limits
    il = find((branch[:, RATE_A] != 0) & (branch[:, RATE_A] < 1e10))
    nl2 = len(il)           ## number of constrained lines

    ##-----  run opf  -----
    f_fcn = lambda x, return_hessian=False: opf_costfcn(x, om, return_hessian)
    gh_fcn = lambda x: opf_consfcn(x, om, Ybus, Yf[il, :], Yt[il,:], ppopt, il)
    hess_fcn = lambda x, lmbda, cost_mult: opf_hessfcn(x, lmbda, om, Ybus, Yf[il, :], Yt[il, :], ppopt, il, cost_mult)

    solution = pips(f_fcn, x0, A, l, u, xmin, xmax, gh_fcn, hess_fcn, opt)
    x, f, info, lmbda, output = solution["x"], solution["f"], \
            solution["eflag"], solution["lmbda"], solution["output"]

    success = (info > 0)

    ## update solution data
    Va = x[vv["i1"]["Va"]:vv["iN"]["Va"]]
    Vm = x[vv["i1"]["Vm"]:vv["iN"]["Vm"]]
    Pg = x[vv["i1"]["Pg"]:vv["iN"]["Pg"]]
    Qg = x[vv["i1"]["Qg"]:vv["iN"]["Qg"]]

    V = Vm * exp(1j * Va)

    ##-----  calculate return values  -----
    ## update voltages & generator outputs
    bus[:, VA] = Va * 180 / pi
    bus[:, VM] = Vm
    gen[:, PG] = Pg * baseMVA
    gen[:, QG] = Qg * baseMVA
    gen[:, VG] = Vm[ gen[:, GEN_BUS].astype(int) ]

    ## compute branch flows
    Sf = V[ branch[:, F_BUS].astype(int) ] * conj(Yf * V)  ## cplx pwr at "from" bus, p["u"].
    St = V[ branch[:, T_BUS].astype(int) ] * conj(Yt * V)  ## cplx pwr at "to" bus, p["u"].
    branch[:, PF] = Sf.real * baseMVA
    branch[:, QF] = Sf.imag * baseMVA
    branch[:, PT] = St.real * baseMVA
    branch[:, QT] = St.imag * baseMVA

    ## line constraint is actually on square of limit
    ## so we must fix multipliers
    muSf = zeros(nl)
    muSt = zeros(nl)
    if len(il) > 0:
        muSf[il] = \
            2 * lmbda["ineqnonlin"][:nl2] * branch[il, RATE_A] / baseMVA
        muSt[il] = \
            2 * lmbda["ineqnonlin"][nl2:nl2+nl2] * branch[il, RATE_A] / baseMVA

    ## update Lagrange multipliers
    bus[:, MU_VMAX]  = lmbda["upper"][vv["i1"]["Vm"]:vv["iN"]["Vm"]]
    bus[:, MU_VMIN]  = lmbda["lower"][vv["i1"]["Vm"]:vv["iN"]["Vm"]]
    gen[:, MU_PMAX]  = lmbda["upper"][vv["i1"]["Pg"]:vv["iN"]["Pg"]] / baseMVA
    gen[:, MU_PMIN]  = lmbda["lower"][vv["i1"]["Pg"]:vv["iN"]["Pg"]] / baseMVA
    gen[:, MU_QMAX]  = lmbda["upper"][vv["i1"]["Qg"]:vv["iN"]["Qg"]] / baseMVA
    gen[:, MU_QMIN]  = lmbda["lower"][vv["i1"]["Qg"]:vv["iN"]["Qg"]] / baseMVA

    bus[:, LAM_P] = \
        lmbda["eqnonlin"][nn["i1"]["Pmis"]:nn["iN"]["Pmis"]] / baseMVA
    bus[:, LAM_Q] = \
        lmbda["eqnonlin"][nn["i1"]["Qmis"]:nn["iN"]["Qmis"]] / baseMVA
    branch[:, MU_SF] = muSf / baseMVA
    branch[:, MU_ST] = muSt / baseMVA

    ## package up results
    nlnN = om.getN('nln')

    ## extract multipliers for nonlinear constraints
    kl = find(lmbda["eqnonlin"] < 0)
    ku = find(lmbda["eqnonlin"] > 0)
    nl_mu_l = zeros(nlnN)
    nl_mu_u = r_[zeros(2*nb), muSf, muSt]
    nl_mu_l[kl] = -lmbda["eqnonlin"][kl]
    nl_mu_u[ku] =  lmbda["eqnonlin"][ku]

    mu = {
      'var': {'l': lmbda["lower"], 'u': lmbda["upper"]},
      'nln': {'l': nl_mu_l, 'u': nl_mu_u},
      'lin': {'l': lmbda["mu_l"], 'u': lmbda["mu_u"]} }

    results = ppc
    results["bus"], results["branch"], results["gen"], \
        results["om"], results["x"], results["mu"], results["f"] = \
            bus, branch, gen, om, x, mu, f

    pimul = r_[
        results["mu"]["nln"]["l"] - results["mu"]["nln"]["u"],
        results["mu"]["lin"]["l"] - results["mu"]["lin"]["u"],
        -ones(ny > 0),
        results["mu"]["var"]["l"] - results["mu"]["var"]["u"],
    ]
    raw = {'xr': x, 'pimul': pimul, 'info': info, 'output': output}

    return results, success, raw
def t_pips(quiet=False):
    """Tests of pips NLP solver.

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Richard Lincoln
    """
    t_begin(60, quiet)

    t = 'unconstrained banana function : '
    ## from MATLAB Optimization Toolbox's bandem.m
    f_fcn = f2
    x0 = array([-1.9, 2])
    # solution = pips(f_fcn, x0, opt={'verbose': 2})
    solution = pips(f_fcn, x0)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1, 1], 13, [t, 'x'])
    t_is(f, 0, 13, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])

    t = 'unconstrained 3-d quadratic : '
    ## from http://www.akiti.ca/QuadProgEx0Constr.html
    f_fcn = f3
    x0 = array([0, 0, 0], float)
    # solution = pips(f_fcn, x0, opt={'verbose': 2})
    solution = pips(f_fcn, x0)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [3, 5, 7], 13, [t, 'x'])
    t_is(f, -244, 13, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])

    t = 'constrained 4-d QP : '
    ## from http://www.jmu.edu/docs/sasdoc/sashtml/iml/chap8/sect12.htm
    f_fcn = f4
    x0 = array([1.0, 0.0, 0.0, 1.0])
    A = array([
        [1.0,  1.0,  1.0,  1.0 ],
        [0.17, 0.11, 0.10, 0.18]
    ])
    l = array([1,  0.10])
    u = array([1.0, Inf])
    xmin = zeros(4)
    # solution = pips(f_fcn, x0, A, l, u, xmin, opt={'verbose': 2})
    solution = pips(f_fcn, x0, A, l, u, xmin)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, array([0, 2.8, 0.2, 0]) / 3, 6, [t, 'x'])
    t_is(f, 3.29 / 3, 6, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_is(lam['mu_l'], array([6.58, 0]) / 3, 6, [t, 'lam.mu_l'])
    t_is(lam['mu_u'], array([0, 0]), 13, [t, 'lam.mu_u'])
    t_is(lam['lower'], array([2.24, 0, 0, 1.7667]), 4, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])

    # H = array([
    #     [1003.1, 4.3, 6.3,  5.9],
    #     [   4.3, 2.2, 2.1,  3.9],
    #     [   6.3, 2.1, 3.5,  4.8],
    #     [   5.9, 3.9, 4.8, 10.0]
    # ])
    # c = zeros(4)
    # ## check with quadprog (for dev testing only)
    # x, f, s, out, lam = quadprog(H,c,-A(2,:), -0.10, A(1,:), 1, xmin)
    # t_is(s, 1, 13, [t, 'success'])
    # t_is(x, [0 2.8 0.2 0]/3, 6, [t, 'x'])
    # t_is(f, 3.29/3, 6, [t, 'f'])
    # t_is(lam['eqlin'], -6.58/3, 6, [t, 'lam.eqlin'])
    # t_is(lam.['ineqlin'], 0, 13, [t, 'lam.ineqlin'])
    # t_is(lam['lower'], [2.24001.7667], 4, [t, 'lam[\'lower\']'])
    # t_is(lam['upper'], [0000], 13, [t, 'lam[\'upper\']'])

    t = 'constrained 2-d nonlinear : '
    ## from http://en.wikipedia.org/wiki/Nonlinear_programming#2-dimensional_example
    f_fcn = f5
    gh_fcn = gh5
    hess_fcn = hess5
    x0 = array([1.1, 0.0])
    xmin = zeros(2)
    # xmax = 3 * ones(2, 1)
    # solution = pips(f_fcn, x0, xmin=xmin, gh_fcn=gh_fcn, hess_fcn=hess_fcn, opt={'verbose': 2})
    solution = pips(f_fcn, x0, xmin=xmin, gh_fcn=gh_fcn, hess_fcn=hess_fcn)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1, 1], 6, [t, 'x'])
    t_is(f, -2, 6, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_is(lam['ineqnonlin'], array([0, 0.5]), 6, [t, 'lam.ineqnonlin'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])
    # ## check with fmincon (for dev testing only)
    # # fmoptions = optimset('Algorithm', 'interior-point')
    # # [x, f, s, out, lam] = fmincon(f_fcn, x0, [], [], [], [], xmin, [], gh_fcn, fmoptions)
    # [x, f, s, out, lam] = fmincon(f_fcn, x0, [], [], [], [], [], [], gh_fcn)
    # t_is(s, 1, 13, [t, 'success'])
    # t_is(x, [1 1], 4, [t, 'x'])
    # t_is(f, -2, 6, [t, 'f'])
    # t_is(lam.ineqnonlin, [00.5], 6, [t, 'lam.ineqnonlin'])

    t = 'constrained 3-d nonlinear : '
    ## from http://en.wikipedia.org/wiki/Nonlinear_programming#3-dimensional_example
    f_fcn = f6
    gh_fcn = gh6
    hess_fcn = hess6
    x0 = array([1.0, 1.0, 0.0])
    # solution = pips(f_fcn, x0, gh_fcn=gh_fcn, hess_fcn=hess_fcn, opt={'verbose': 2, 'comptol': 1e-9})
    solution = pips(f_fcn, x0, gh_fcn=gh_fcn, hess_fcn=hess_fcn)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1.58113883, 2.23606798, 1.58113883], 6, [t, 'x'])
    t_is(f, -5 * sqrt(2), 6, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_is(lam['ineqnonlin'], array([0, sqrt(2) / 2]), 7, [t, 'lam.ineqnonlin'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])
    # ## check with fmincon (for dev testing only)
    # # fmoptions = optimset('Algorithm', 'interior-point')
    # # [x, f, s, out, lam] = fmincon(f_fcn, x0, [], [], [], [], xmin, [], gh_fcn, fmoptions)
    # [x, f, s, out, lam] = fmincon(f_fcn, x0, [], [], [], [], [], [], gh_fcn)
    # t_is(s, 1, 13, [t, 'success'])
    # t_is(x, [1.58113883 2.23606798 1.58113883], 4, [t, 'x'])
    # t_is(f, -5*sqrt(2), 8, [t, 'f'])
    # t_is(lam.ineqnonlin, [0sqrt(2)/2], 8, [t, 'lam.ineqnonlin'])

    t = 'constrained 3-d nonlinear (dict) : '
    p = {'f_fcn': f_fcn, 'x0': x0, 'gh_fcn': gh_fcn, 'hess_fcn': hess_fcn}
    solution = pips(p)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1.58113883, 2.23606798, 1.58113883], 6, [t, 'x'])
    t_is(f, -5 * sqrt(2), 6, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_is(lam['ineqnonlin'], [0, sqrt(2) / 2], 7, [t, 'lam.ineqnonlin'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])

    t = 'constrained 4-d nonlinear : '
    ## Hock & Schittkowski test problem #71
    f_fcn = f7
    gh_fcn = gh7
    hess_fcn = hess7
    x0 = array([1.0, 5.0, 5.0, 1.0])
    xmin = ones(4)
    xmax = 5 * xmin
    # solution = pips(f_fcn, x0, xmin=xmin, xmax=xmax, gh_fcn=gh_fcn, hess_fcn=hess_fcn, opt={'verbose': 2, 'comptol': 1e-9})
    solution = pips(f_fcn, x0, xmin=xmin, xmax=xmax, gh_fcn=gh_fcn, hess_fcn=hess_fcn)
    x, f, s, lam, _ = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1, 4.7429994, 3.8211503, 1.3794082], 6, [t, 'x'])
    t_is(f, 17.0140173, 6, [t, 'f'])
    t_is(lam['eqnonlin'], 0.1614686, 5, [t, 'lam.eqnonlin'])
    t_is(lam['ineqnonlin'], 0.55229366, 5, [t, 'lam.ineqnonlin'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], [1.08787121024, 0, 0, 0], 5, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 7, [t, 'lam[\'upper\']'])

    t_end()
Example #4
0
def pipsopf_solver(om, ppopt, out_opt=None):
    """Solves AC optimal power flow using PIPS.

    Inputs are an OPF model object, a PYPOWER options vector and
    a dict containing keys (can be empty) for each of the desired
    optional output fields.

    outputs are a C{results} dict, C{success} flag and C{raw} output dict.

    C{results} is a PYPOWER case dict (ppc) with the usual baseMVA, bus
    branch, gen, gencost fields, along with the following additional
    fields:
        - C{order}      see 'help ext2int' for details of this field
        - C{x}          final value of optimization variables (internal order)
        - C{f}          final objective function value
        - C{mu}         shadow prices on ...
            - C{var}
                - C{l}  lower bounds on variables
                - C{u}  upper bounds on variables
            - C{nln}
                - C{l}  lower bounds on nonlinear constraints
                - C{u}  upper bounds on nonlinear constraints
            - C{lin}
                - C{l}  lower bounds on linear constraints
                - C{u}  upper bounds on linear constraints

    C{success} is C{True} if solver converged successfully, C{False} otherwise

    C{raw} is a raw output dict in form returned by MINOS
        - xr     final value of optimization variables
        - pimul  constraint multipliers
        - info   solver specific termination code
        - output solver specific output information

    @see: L{opf}, L{pips}

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    @author: Richard Lincoln
    """
    ##----- initialization -----
    ## optional output
    if out_opt is None:
        out_opt = {}

    ## options
    verbose = ppopt['VERBOSE']
    feastol = ppopt['PDIPM_FEASTOL']
    gradtol = ppopt['PDIPM_GRADTOL']
    comptol = ppopt['PDIPM_COMPTOL']
    costtol = ppopt['PDIPM_COSTTOL']
    max_it = ppopt['PDIPM_MAX_IT']
    max_red = ppopt['SCPDIPM_RED_IT']
    step_control = (ppopt['OPF_ALG'] == 565)  ## OPF_ALG == 565, PIPS-sc
    if feastol == 0:
        feastol = ppopt['OPF_VIOLATION']
    opt = {
        'feastol': feastol,
        'gradtol': gradtol,
        'comptol': comptol,
        'costtol': costtol,
        'max_it': max_it,
        'max_red': max_red,
        'step_control': step_control,
        'cost_mult': 1e-4,
        'verbose': verbose
    }

    ## unpack data
    ppc = om.get_ppc()
    baseMVA, bus, gen, branch, gencost = \
        ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"], ppc["gencost"]
    vv, _, nn, _ = om.get_idx()

    ## problem dimensions
    nb = bus.shape[0]  ## number of buses
    nl = branch.shape[0]  ## number of branches
    ny = om.getN('var', 'y')  ## number of piece-wise linear costs

    ## linear constraints
    A, l, u = om.linear_constraints()

    ## bounds on optimization vars
    _, xmin, xmax = om.getv()

    ## build admittance matrices
    Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)

    ## try to select an interior initial point
    ll, uu = xmin.copy(), xmax.copy()
    ll[xmin == -Inf] = -1e10  ## replace Inf with numerical proxies
    uu[xmax == Inf] = 1e10
    x0 = (ll + uu) / 2
    Varefs = bus[bus[:, BUS_TYPE] == REF, VA] * (pi / 180)
    ## angles set to first reference angle
    x0[vv["i1"]["Va"]:vv["iN"]["Va"]] = Varefs[0]
    if ny > 0:
        ipwl = find(gencost[:, MODEL] == PW_LINEAR)
        #         PQ = r_[gen[:, PMAX], gen[:, QMAX]]
        #         c = totcost(gencost[ipwl, :], PQ[ipwl])
        c = gencost.flatten('F')[sub2ind(
            gencost.shape, ipwl,
            NCOST + 2 * gencost[ipwl, NCOST])]  ## largest y-value in CCV data
        x0[vv["i1"]["y"]:vv["iN"]["y"]] = max(c) + 0.1 * abs(max(c))
#        x0[vv["i1"]["y"]:vv["iN"]["y"]] = c + 0.1 * abs(c)

## find branches with flow limits
    il = find((branch[:, RATE_A] != 0) & (branch[:, RATE_A] < 1e10))
    nl2 = len(il)  ## number of constrained lines

    ##-----  run opf  -----
    f_fcn = lambda x, return_hessian=False: opf_costfcn(x, om, return_hessian)
    gh_fcn = lambda x: opf_consfcn(x, om, Ybus, Yf[il, :], Yt[il, :], ppopt, il
                                   )
    hess_fcn = lambda x, lmbda, cost_mult: opf_hessfcn(x, lmbda, om, Ybus, Yf[
        il, :], Yt[il, :], ppopt, il, cost_mult)

    solution = pips(f_fcn, x0, A, l, u, xmin, xmax, gh_fcn, hess_fcn, opt)
    x, f, info, lmbda, output = solution["x"], solution["f"], \
            solution["eflag"], solution["lmbda"], solution["output"]

    success = (info > 0)

    ## update solution data
    Va = x[vv["i1"]["Va"]:vv["iN"]["Va"]]
    Vm = x[vv["i1"]["Vm"]:vv["iN"]["Vm"]]
    Pg = x[vv["i1"]["Pg"]:vv["iN"]["Pg"]]
    Qg = x[vv["i1"]["Qg"]:vv["iN"]["Qg"]]

    V = Vm * exp(1j * Va)

    ##-----  calculate return values  -----
    ## update voltages & generator outputs
    bus[:, VA] = Va * 180 / pi
    bus[:, VM] = Vm
    gen[:, PG] = Pg * baseMVA
    gen[:, QG] = Qg * baseMVA
    gen[:, VG] = Vm[gen[:, GEN_BUS].astype(int)]

    ## compute branch flows
    Sf = V[branch[:, F_BUS].astype(int)] * conj(
        Yf * V)  ## cplx pwr at "from" bus, p["u"].
    St = V[branch[:, T_BUS].astype(int)] * conj(
        Yt * V)  ## cplx pwr at "to" bus, p["u"].
    branch[:, PF] = Sf.real * baseMVA
    branch[:, QF] = Sf.imag * baseMVA
    branch[:, PT] = St.real * baseMVA
    branch[:, QT] = St.imag * baseMVA

    ## line constraint is actually on square of limit
    ## so we must fix multipliers
    muSf = zeros(nl)
    muSt = zeros(nl)
    if len(il) > 0:
        muSf[il] = \
            2 * lmbda["ineqnonlin"][:nl2] * branch[il, RATE_A] / baseMVA
        muSt[il] = \
            2 * lmbda["ineqnonlin"][nl2:nl2+nl2] * branch[il, RATE_A] / baseMVA

    ## update Lagrange multipliers
    bus[:, MU_VMAX] = lmbda["upper"][vv["i1"]["Vm"]:vv["iN"]["Vm"]]
    bus[:, MU_VMIN] = lmbda["lower"][vv["i1"]["Vm"]:vv["iN"]["Vm"]]
    gen[:, MU_PMAX] = lmbda["upper"][vv["i1"]["Pg"]:vv["iN"]["Pg"]] / baseMVA
    gen[:, MU_PMIN] = lmbda["lower"][vv["i1"]["Pg"]:vv["iN"]["Pg"]] / baseMVA
    gen[:, MU_QMAX] = lmbda["upper"][vv["i1"]["Qg"]:vv["iN"]["Qg"]] / baseMVA
    gen[:, MU_QMIN] = lmbda["lower"][vv["i1"]["Qg"]:vv["iN"]["Qg"]] / baseMVA

    bus[:, LAM_P] = \
        lmbda["eqnonlin"][nn["i1"]["Pmis"]:nn["iN"]["Pmis"]] / baseMVA
    bus[:, LAM_Q] = \
        lmbda["eqnonlin"][nn["i1"]["Qmis"]:nn["iN"]["Qmis"]] / baseMVA
    branch[:, MU_SF] = muSf / baseMVA
    branch[:, MU_ST] = muSt / baseMVA

    ## package up results
    nlnN = om.getN('nln')

    ## extract multipliers for nonlinear constraints
    kl = find(lmbda["eqnonlin"] < 0)
    ku = find(lmbda["eqnonlin"] > 0)
    nl_mu_l = zeros(nlnN)
    nl_mu_u = r_[zeros(2 * nb), muSf, muSt]
    nl_mu_l[kl] = -lmbda["eqnonlin"][kl]
    nl_mu_u[ku] = lmbda["eqnonlin"][ku]

    mu = {
        'var': {
            'l': lmbda["lower"],
            'u': lmbda["upper"]
        },
        'nln': {
            'l': nl_mu_l,
            'u': nl_mu_u
        },
        'lin': {
            'l': lmbda["mu_l"],
            'u': lmbda["mu_u"]
        }
    }

    results = ppc
    results["bus"], results["branch"], results["gen"], \
        results["om"], results["x"], results["mu"], results["f"] = \
            bus, branch, gen, om, x, mu, f

    pimul = r_[results["mu"]["nln"]["l"] - results["mu"]["nln"]["u"],
               results["mu"]["lin"]["l"] - results["mu"]["lin"]["u"],
               -ones(ny > 0),
               results["mu"]["var"]["l"] - results["mu"]["var"]["u"], ]
    raw = {'xr': x, 'pimul': pimul, 'info': info, 'output': output}

    return results, success, raw
def qps_pips(H, c, A, l, u, xmin=None, xmax=None, x0=None, opt=None):
    """Uses the Python Interior Point Solver (PIPS) to solve the following
    QP (quadratic programming) problem::

            min 1/2 x'*H*x + C'*x
             x

    subject to::

            l <= A*x <= u       (linear constraints)
            xmin <= x <= xmax   (variable bounds)

    Note the calling syntax is almost identical to that of QUADPROG from
    MathWorks' Optimization Toolbox. The main difference is that the linear
    constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B},
    C{Aeq}, C{Beq}.

    Example from U{http://www.uc.edu/sashtml/iml/chap8/sect12.htm}:

        >>> from numpy import array, zeros, Inf
        >>> from scipy.sparse import csr_matrix
        >>> H = csr_matrix(array([[1003.1,  4.3,     6.3,     5.9],
        ...                       [4.3,     2.2,     2.1,     3.9],
        ...                       [6.3,     2.1,     3.5,     4.8],
        ...                       [5.9,     3.9,     4.8,     10 ]]))
        >>> c = zeros(4)
        >>> A = csr_matrix(array([[1,       1,       1,       1   ],
        ...                       [0.17,    0.11,    0.10,    0.18]]))
        >>> l = array([1, 0.10])
        >>> u = array([1, Inf])
        >>> xmin = zeros(4)
        >>> xmax = None
        >>> x0 = array([1, 0, 0, 1])
        >>> solution = qps_pips(H, c, A, l, u, xmin, xmax, x0)
        >>> round(solution["f"], 11) == 1.09666678128
        True
        >>> solution["converged"]
        True
        >>> solution["output"]["iterations"]
        10

    All parameters are optional except C{H}, C{c}, C{A} and C{l} or C{u}.
    @param H: Quadratic cost coefficients.
    @type H: csr_matrix
    @param c: vector of linear cost coefficients
    @type c: array
    @param A: Optional linear constraints.
    @type A: csr_matrix
    @param l: Optional linear constraints. Default values are M{-Inf}.
    @type l: array
    @param u: Optional linear constraints. Default values are M{Inf}.
    @type u: array
    @param xmin: Optional lower bounds on the M{x} variables, defaults are
                 M{-Inf}.
    @type xmin: array
    @param xmax: Optional upper bounds on the M{x} variables, defaults are
                 M{Inf}.
    @type xmax: array
    @param x0: Starting value of optimization vector M{x}.
    @type x0: array
    @param opt: optional options dictionary with the following keys, all of
                which are also optional (default values shown in parentheses)
                  - C{verbose} (False) - Controls level of progress output
                    displayed
                  - C{feastol} (1e-6) - termination tolerance for feasibility
                    condition
                  - C{gradtol} (1e-6) - termination tolerance for gradient
                    condition
                  - C{comptol} (1e-6) - termination tolerance for
                    complementarity condition
                  - C{costtol} (1e-6) - termination tolerance for cost
                    condition
                  - C{max_it} (150) - maximum number of iterations
                  - C{step_control} (False) - set to True to enable step-size
                    control
                  - C{max_red} (20) - maximum number of step-size reductions if
                    step-control is on
                  - C{cost_mult} (1.0) - cost multiplier used to scale the
                    objective function for improved conditioning. Note: The
                    same value must also be passed to the Hessian evaluation
                    function so that it can appropriately scale the objective
                    function term in the Hessian of the Lagrangian.
    @type opt: dict

    @rtype: dict
    @return: The solution dictionary has the following keys:
               - C{x} - solution vector
               - C{f} - final objective function value
               - C{converged} - exit status
                   - True = first order optimality conditions satisfied
                   - False = maximum number of iterations reached
                   - None = numerically failed
               - C{output} - output dictionary with keys:
                   - C{iterations} - number of iterations performed
                   - C{hist} - dictionary of arrays with trajectories of the
                     following: feascond, gradcond, coppcond, costcond, gamma,
                     stepsize, obj, alphap, alphad
                   - C{message} - exit message
               - C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker
                 multipliers on the constraints, with keys:
                   - C{eqnonlin} - nonlinear equality constraints
                   - C{ineqnonlin} - nonlinear inequality constraints
                   - C{mu_l} - lower (left-hand) limit on linear constraints
                   - C{mu_u} - upper (right-hand) limit on linear constraints
                   - C{lower} - lower bound on optimization variables
                   - C{upper} - upper bound on optimization variables

    @see: L{pips}

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Richard Lincoln
    """
    if isinstance(H, dict):
        p = H
    else:
        p = {'H': H, 'c': c, 'A': A, 'l': l, 'u': u}
        if xmin is not None: p['xmin'] = xmin
        if xmax is not None: p['xmax'] = xmax
        if x0 is not None: p['x0'] = x0
        if opt is not None: p['opt'] = opt

    if 'H' not in p or p['H'] == None:#p['H'].nnz == 0:
        if p['A'] is None or p['A'].nnz == 0 and \
           'xmin' not in p and \
           'xmax' not in p:
#           'xmin' not in p or len(p['xmin']) == 0 and \
#           'xmax' not in p or len(p['xmax']) == 0:
            print('qps_pips: LP problem must include constraints or variable bounds')
            return
        else:
            if p['A'] is not None and p['A'].nnz >= 0:
                nx = p['A'].shape[1]
            elif 'xmin' in p and len(p['xmin']) > 0:
                nx = p['xmin'].shape[0]
            elif 'xmax' in p and len(p['xmax']) > 0:
                nx = p['xmax'].shape[0]
        p['H'] = sparse((nx, nx))
    else:
        nx = p['H'].shape[0]

    p['xmin'] = -Inf * ones(nx) if 'xmin' not in p else p['xmin']
    p['xmax'] =  Inf * ones(nx) if 'xmax' not in p else p['xmax']

    p['c'] = zeros(nx) if p['c'] is None else p['c']

    p['x0'] = zeros(nx) if 'x0' not in p else p['x0']

    def qp_f(x, return_hessian=False):
        f = 0.5 * dot(x * p['H'], x) + dot(p['c'], x)
        df = p['H'] * x + p['c']
        if not return_hessian:
            return f, df
        d2f = p['H']
        return f, df, d2f

    p['f_fcn'] = qp_f

    sol = pips(p)

    return sol["x"], sol["f"], sol["eflag"], sol["output"], sol["lmbda"]
Example #6
0
def qps_pips(H, c, A, l, u, xmin=None, xmax=None, x0=None, opt=None):
    """Uses the Python Interior Point Solver (PIPS) to solve the following
    QP (quadratic programming) problem::

            min 1/2 x'*H*x + C'*x
             x

    subject to::

            l <= A*x <= u       (linear constraints)
            xmin <= x <= xmax   (variable bounds)

    Note the calling syntax is almost identical to that of QUADPROG from
    MathWorks' Optimization Toolbox. The main difference is that the linear
    constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B},
    C{Aeq}, C{Beq}.

    Example from U{http://www.uc.edu/sashtml/iml/chap8/sect12.htm}:

        >>> from numpy import array, zeros, Inf
        >>> from scipy.sparse import csr_matrix
        >>> H = csr_matrix(array([[1003.1,  4.3,     6.3,     5.9],
        ...                       [4.3,     2.2,     2.1,     3.9],
        ...                       [6.3,     2.1,     3.5,     4.8],
        ...                       [5.9,     3.9,     4.8,     10 ]]))
        >>> c = zeros(4)
        >>> A = csr_matrix(array([[1,       1,       1,       1   ],
        ...                       [0.17,    0.11,    0.10,    0.18]]))
        >>> l = array([1, 0.10])
        >>> u = array([1, Inf])
        >>> xmin = zeros(4)
        >>> xmax = None
        >>> x0 = array([1, 0, 0, 1])
        >>> solution = qps_pips(H, c, A, l, u, xmin, xmax, x0)
        >>> round(solution["f"], 11) == 1.09666678128
        True
        >>> solution["converged"]
        True
        >>> solution["output"]["iterations"]
        10

    All parameters are optional except C{H}, C{c}, C{A} and C{l} or C{u}.
    @param H: Quadratic cost coefficients.
    @type H: csr_matrix
    @param c: vector of linear cost coefficients
    @type c: array
    @param A: Optional linear constraints.
    @type A: csr_matrix
    @param l: Optional linear constraints. Default values are M{-Inf}.
    @type l: array
    @param u: Optional linear constraints. Default values are M{Inf}.
    @type u: array
    @param xmin: Optional lower bounds on the M{x} variables, defaults are
                 M{-Inf}.
    @type xmin: array
    @param xmax: Optional upper bounds on the M{x} variables, defaults are
                 M{Inf}.
    @type xmax: array
    @param x0: Starting value of optimization vector M{x}.
    @type x0: array
    @param opt: optional options dictionary with the following keys, all of
                which are also optional (default values shown in parentheses)
                  - C{verbose} (False) - Controls level of progress output
                    displayed
                  - C{feastol} (1e-6) - termination tolerance for feasibility
                    condition
                  - C{gradtol} (1e-6) - termination tolerance for gradient
                    condition
                  - C{comptol} (1e-6) - termination tolerance for
                    complementarity condition
                  - C{costtol} (1e-6) - termination tolerance for cost
                    condition
                  - C{max_it} (150) - maximum number of iterations
                  - C{step_control} (False) - set to True to enable step-size
                    control
                  - C{max_red} (20) - maximum number of step-size reductions if
                    step-control is on
                  - C{cost_mult} (1.0) - cost multiplier used to scale the
                    objective function for improved conditioning. Note: The
                    same value must also be passed to the Hessian evaluation
                    function so that it can appropriately scale the objective
                    function term in the Hessian of the Lagrangian.
    @type opt: dict

    @rtype: dict
    @return: The solution dictionary has the following keys:
               - C{x} - solution vector
               - C{f} - final objective function value
               - C{converged} - exit status
                   - True = first order optimality conditions satisfied
                   - False = maximum number of iterations reached
                   - None = numerically failed
               - C{output} - output dictionary with keys:
                   - C{iterations} - number of iterations performed
                   - C{hist} - dictionary of arrays with trajectories of the
                     following: feascond, gradcond, coppcond, costcond, gamma,
                     stepsize, obj, alphap, alphad
                   - C{message} - exit message
               - C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker
                 multipliers on the constraints, with keys:
                   - C{eqnonlin} - nonlinear equality constraints
                   - C{ineqnonlin} - nonlinear inequality constraints
                   - C{mu_l} - lower (left-hand) limit on linear constraints
                   - C{mu_u} - upper (right-hand) limit on linear constraints
                   - C{lower} - lower bound on optimization variables
                   - C{upper} - upper bound on optimization variables

    @see: L{pips}

    @author: Ray Zimmerman (PSERC Cornell)
    """
    if isinstance(H, dict):
        p = H
    else:
        p = {'H': H, 'c': c, 'A': A, 'l': l, 'u': u}
        if xmin is not None: p['xmin'] = xmin
        if xmax is not None: p['xmax'] = xmax
        if x0 is not None: p['x0'] = x0
        if opt is not None: p['opt'] = opt

    if 'H' not in p or p['H'] == None:  #p['H'].nnz == 0:
        if p['A'] is None or p['A'].nnz == 0 and \
           'xmin' not in p and \
           'xmax' not in p:
            #           'xmin' not in p or len(p['xmin']) == 0 and \
            #           'xmax' not in p or len(p['xmax']) == 0:
            print(
                'qps_pips: LP problem must include constraints or variable bounds'
            )
            return
        else:
            if p['A'] is not None and p['A'].nnz >= 0:
                nx = p['A'].shape[1]
            elif 'xmin' in p and len(p['xmin']) > 0:
                nx = p['xmin'].shape[0]
            elif 'xmax' in p and len(p['xmax']) > 0:
                nx = p['xmax'].shape[0]
        p['H'] = sparse((nx, nx))
    else:
        nx = p['H'].shape[0]

    p['xmin'] = -Inf * ones(nx) if 'xmin' not in p else p['xmin']
    p['xmax'] = Inf * ones(nx) if 'xmax' not in p else p['xmax']

    p['c'] = zeros(nx) if p['c'] is None else p['c']

    p['x0'] = zeros(nx) if 'x0' not in p else p['x0']

    def qp_f(x, return_hessian=False):
        f = 0.5 * dot(x * p['H'], x) + dot(p['c'], x)
        df = p['H'] * x + p['c']
        if not return_hessian:
            return f, df
        d2f = p['H']
        return f, df, d2f

    p['f_fcn'] = qp_f

    sol = pips(p)

    return sol["x"], sol["f"], sol["eflag"], sol["output"], sol["lmbda"]
Example #7
0
def t_pips(quiet=False):
    """Tests of pips NLP solver.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    t_begin(60, quiet)

    t = 'unconstrained banana function : '
    ## from MATLAB Optimization Toolbox's bandem.m
    f_fcn = f2
    x0 = array([-1.9, 2])
    # solution = pips(f_fcn, x0, opt={'verbose': 2})
    solution = pips(f_fcn, x0)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1, 1], 13, [t, 'x'])
    t_is(f, 0, 13, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])

    t = 'unconstrained 3-d quadratic : '
    ## from http://www.akiti.ca/QuadProgEx0Constr.html
    f_fcn = f3
    x0 = array([0, 0, 0], float)
    # solution = pips(f_fcn, x0, opt={'verbose': 2})
    solution = pips(f_fcn, x0)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [3, 5, 7], 13, [t, 'x'])
    t_is(f, -244, 13, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])

    t = 'constrained 4-d QP : '
    ## from http://www.jmu.edu/docs/sasdoc/sashtml/iml/chap8/sect12.htm
    f_fcn = f4
    x0 = array([1.0, 0.0, 0.0, 1.0])
    A = array([[1.0, 1.0, 1.0, 1.0], [0.17, 0.11, 0.10, 0.18]])
    l = array([1, 0.10])
    u = array([1.0, Inf])
    xmin = zeros(4)
    # solution = pips(f_fcn, x0, A, l, u, xmin, opt={'verbose': 2})
    solution = pips(f_fcn, x0, A, l, u, xmin)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, array([0, 2.8, 0.2, 0]) / 3, 6, [t, 'x'])
    t_is(f, 3.29 / 3, 6, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_is(lam['mu_l'], array([6.58, 0]) / 3, 6, [t, 'lam.mu_l'])
    t_is(lam['mu_u'], array([0, 0]), 13, [t, 'lam.mu_u'])
    t_is(lam['lower'], array([2.24, 0, 0, 1.7667]), 4, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])

    # H = array([
    #     [1003.1, 4.3, 6.3,  5.9],
    #     [   4.3, 2.2, 2.1,  3.9],
    #     [   6.3, 2.1, 3.5,  4.8],
    #     [   5.9, 3.9, 4.8, 10.0]
    # ])
    # c = zeros(4)
    # ## check with quadprog (for dev testing only)
    # x, f, s, out, lam = quadprog(H,c,-A(2,:), -0.10, A(1,:), 1, xmin)
    # t_is(s, 1, 13, [t, 'success'])
    # t_is(x, [0 2.8 0.2 0]/3, 6, [t, 'x'])
    # t_is(f, 3.29/3, 6, [t, 'f'])
    # t_is(lam['eqlin'], -6.58/3, 6, [t, 'lam.eqlin'])
    # t_is(lam.['ineqlin'], 0, 13, [t, 'lam.ineqlin'])
    # t_is(lam['lower'], [2.24001.7667], 4, [t, 'lam[\'lower\']'])
    # t_is(lam['upper'], [0000], 13, [t, 'lam[\'upper\']'])

    t = 'constrained 2-d nonlinear : '
    ## from http://en.wikipedia.org/wiki/Nonlinear_programming#2-dimensional_example
    f_fcn = f5
    gh_fcn = gh5
    hess_fcn = hess5
    x0 = array([1.1, 0.0])
    xmin = zeros(2)
    # xmax = 3 * ones(2, 1)
    # solution = pips(f_fcn, x0, xmin=xmin, gh_fcn=gh_fcn, hess_fcn=hess_fcn, opt={'verbose': 2})
    solution = pips(f_fcn, x0, xmin=xmin, gh_fcn=gh_fcn, hess_fcn=hess_fcn)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1, 1], 6, [t, 'x'])
    t_is(f, -2, 6, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_is(lam['ineqnonlin'], array([0, 0.5]), 6, [t, 'lam.ineqnonlin'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])
    # ## check with fmincon (for dev testing only)
    # # fmoptions = optimset('Algorithm', 'interior-point')
    # # [x, f, s, out, lam] = fmincon(f_fcn, x0, [], [], [], [], xmin, [], gh_fcn, fmoptions)
    # [x, f, s, out, lam] = fmincon(f_fcn, x0, [], [], [], [], [], [], gh_fcn)
    # t_is(s, 1, 13, [t, 'success'])
    # t_is(x, [1 1], 4, [t, 'x'])
    # t_is(f, -2, 6, [t, 'f'])
    # t_is(lam.ineqnonlin, [00.5], 6, [t, 'lam.ineqnonlin'])

    t = 'constrained 3-d nonlinear : '
    ## from http://en.wikipedia.org/wiki/Nonlinear_programming#3-dimensional_example
    f_fcn = f6
    gh_fcn = gh6
    hess_fcn = hess6
    x0 = array([1.0, 1.0, 0.0])
    # solution = pips(f_fcn, x0, gh_fcn=gh_fcn, hess_fcn=hess_fcn, opt={'verbose': 2, 'comptol': 1e-9})
    solution = pips(f_fcn, x0, gh_fcn=gh_fcn, hess_fcn=hess_fcn)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1.58113883, 2.23606798, 1.58113883], 6, [t, 'x'])
    t_is(f, -5 * sqrt(2), 6, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_is(lam['ineqnonlin'], array([0, sqrt(2) / 2]), 7, [t, 'lam.ineqnonlin'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])
    # ## check with fmincon (for dev testing only)
    # # fmoptions = optimset('Algorithm', 'interior-point')
    # # [x, f, s, out, lam] = fmincon(f_fcn, x0, [], [], [], [], xmin, [], gh_fcn, fmoptions)
    # [x, f, s, out, lam] = fmincon(f_fcn, x0, [], [], [], [], [], [], gh_fcn)
    # t_is(s, 1, 13, [t, 'success'])
    # t_is(x, [1.58113883 2.23606798 1.58113883], 4, [t, 'x'])
    # t_is(f, -5*sqrt(2), 8, [t, 'f'])
    # t_is(lam.ineqnonlin, [0sqrt(2)/2], 8, [t, 'lam.ineqnonlin'])

    t = 'constrained 3-d nonlinear (dict) : '
    p = {'f_fcn': f_fcn, 'x0': x0, 'gh_fcn': gh_fcn, 'hess_fcn': hess_fcn}
    solution = pips(p)
    x, f, s, lam, out = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1.58113883, 2.23606798, 1.58113883], 6, [t, 'x'])
    t_is(f, -5 * sqrt(2), 6, [t, 'f'])
    t_is(out['hist'][-1]['compcond'], 0, 6, [t, 'compcond'])
    t_is(lam['ineqnonlin'], [0, sqrt(2) / 2], 7, [t, 'lam.ineqnonlin'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], zeros(x.shape), 13, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 13, [t, 'lam[\'upper\']'])

    t = 'constrained 4-d nonlinear : '
    ## Hock & Schittkowski test problem #71
    f_fcn = f7
    gh_fcn = gh7
    hess_fcn = hess7
    x0 = array([1.0, 5.0, 5.0, 1.0])
    xmin = ones(4)
    xmax = 5 * xmin
    # solution = pips(f_fcn, x0, xmin=xmin, xmax=xmax, gh_fcn=gh_fcn, hess_fcn=hess_fcn, opt={'verbose': 2, 'comptol': 1e-9})
    solution = pips(f_fcn,
                    x0,
                    xmin=xmin,
                    xmax=xmax,
                    gh_fcn=gh_fcn,
                    hess_fcn=hess_fcn)
    x, f, s, lam, _ = solution["x"], solution["f"], solution["eflag"], \
            solution["lmbda"], solution["output"]
    t_is(s, 1, 13, [t, 'success'])
    t_is(x, [1, 4.7429994, 3.8211503, 1.3794082], 6, [t, 'x'])
    t_is(f, 17.0140173, 6, [t, 'f'])
    t_is(lam['eqnonlin'], 0.1614686, 5, [t, 'lam.eqnonlin'])
    t_is(lam['ineqnonlin'], 0.55229366, 5, [t, 'lam.ineqnonlin'])
    t_ok(len(lam['mu_l']) == 0, [t, 'lam.mu_l'])
    t_ok(len(lam['mu_u']) == 0, [t, 'lam.mu_u'])
    t_is(lam['lower'], [1.08787121024, 0, 0, 0], 5, [t, 'lam[\'lower\']'])
    t_is(lam['upper'], zeros(x.shape), 7, [t, 'lam[\'upper\']'])

    t_end()