Esempio n. 1
0
def solve (p):
    global P
    P = p
    t0 = time.time()

    # Create the problem for Ipopt to solve
    nlp = pyipopt.create(P.nvar, P.x_L, P.x_U, P.ncon, P.g_L, P.g_U, P.nnjz, P.nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g)

    # Set Ipopt solve options
    nlp.int_option("print_level",5)
    nlp.num_option("acceptable_tol",1000.0)
    nlp.num_option("tol",1000.0)
    nlp.num_option("dual_inf_tol",10000.0)
    nlp.num_option("compl_inf_tol",10000.0)
    nlp.int_option("max_iter", 1000)
    #nlp.str_option("derivative_test","first-order")

    stitching_constraint(0.0, 0)

    print "Calling solve"

    x, zl, zu, constrain_mu, obj, status = nlp.solve(P.x) # Solve the NLP problem with Ipopt
    nlp.close()
    if status == 0:
        solve_time = time.time() - t0
        print "Solution found in: ",solve_time," (sec)"
        P.x = x
        P.solved = True
    else:
        print "Failed to find solution!"
        P.x = x
        P.solved = False
Esempio n. 2
0
 def solve_init(self, eval_J, eval_dJ):
     nvar = int(2*self.num_cables)
     ncon = int(self.num_cables + self.num_cables*(self.num_cables-1)/2)
     low_var = -numpy.inf*numpy.ones(nvar,dtype=float)
     up_var = numpy.inf*numpy.ones(nvar, dtype=float)
     up_var[0], low_var[0] = 0, 0
     inf_con = numpy.inf*numpy.ones(ncon, dtype=float)
     zero_con = numpy.zeros(ncon, dtype=float)
     self.nlp = pyipopt.create(nvar,      # Number of controls
                               low_var,  # Lower bounds for Control
                               up_var,   # Upper bounds for Control
                               ncon,      # Number of constraints
                               -inf_con,  # Lower bounds for contraints
                               zero_con,   # Upper bounds for contraints
                               nvar*ncon, # Number of nonzeros in cons. Jac
                               0,         # Number of nonzeros in cons. Hes
                               lambda pos: eval_J(pos),  # Objective eval
                               lambda pos: eval_dJ(pos), # Obj. grad eval
                               self.eval_g,    # Constraint evaluation
                               self.eval_jac_g # Constraint Jacobian evaluation
     )
      # So it does not violate the boundary constraint
     self.nlp.num_option('bound_relax_factor', 0)
     self.nlp.int_option('max_iter', 16)
     self.nlp.int_option("print_level",6)
Esempio n. 3
0
def run(x0, eval_jac_g, eval_h):
      
    if x0.shape != (N*D,):
        raise "x is wrong dims!"      
        
    x_L, x_U = set_x_bounds(lowbnd, upbnd)

    start = time.time()

    print eval_h.nnz
    nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, eval_jac_g.nnz, eval_h.nnz, eval_f_adolc, eval_grad_f, eval_g_adolc, eval_jac_g, eval_h)
    #nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, eval_jac_g.nnz, 365, eval_f_adolc, eval_grad_f, eval_g_adolc, eval_jac_g)


    nlp.int_option('max_iter', maxits)
    nlp.num_option('constr_viol_tol', epsg)
    nlp.num_option('tol', epsf)
    nlp.num_option('acceptable_tol', 1e-3)
    nlp.str_option('linear_solver', linear_solver)
    nlp.str_option('mu_strategy', 'adaptive')
    nlp.num_option('bound_relax_factor', 0)
    nlp.str_option('adaptive_mu_globalization','never-monotone-mode')

    results = nlp.solve(x0)


    print "optimized: ", time.time()-start, "s"
    print "Exit flag = ", results[5]
    print "Action = ", results[4]
    return results[4], results[0]
	def runOptimization(self):

		pyipopt.set_loglevel(0)
		self.nlp = pyipopt.create(
			self.optDict['nPrimal'],
			self.constraintDict['lowerBound'],
			self.constraintDict['upperBound'],
			self.optDict['nConstraint'],
			self.constraintDict['lowerConstraint'],
			self.constraintDict['upperConstraint'],
			self.optDict['nJacobianNonZero'],
			self.optDict['nHessianNonZero'],
			self.evalObjectiveFunction,
			self.evalObjectiveFunctionGradient,
			self.evalConstraints,
			self.evalConstraintsJacbobian)

		self.nlp.int_option("max_iter", 150)
		# self.nlp.str_option('derivative_test', 'first-order')
		# self.nlp.num_option('derivative_test_tol', 1e-2)
		# self.nlp.num_option('derivative_test_perturbation', 1e-3)
		self.nlp.num_option('point_perturbation_radius', 1e-3)
		# self.nlp.int_option('max_iter', 100)
		# self.nlp.str_option('derivative_test_print_all', 'yes')
		# self.nlp.int_option('derivative_test_first_index', 330)
		# self.nlp.num_option('acceptable_constr_viol_tol', 1)

		x, zl, zu, constraint_multipliers, obj, status = self.nlp.solve(self.initialGuess)
		return x[self.sliceDict['varMu']], x[self.sliceDict['varTheta']], self.optX, self.optY
Esempio n. 5
0
def optimizePC(channel, noiseIfPower, rate, linkBandwidth, pMax, p0, m, verbosity=0):
    ''' Uses channel values, PHY parameters and power consumption characteristics to find minimal resource allocation. Returns resource allocation, objective value and IPOPT status. 
    Input:
        channel - 3d array. 0d users, 1d n_tx, 2d n_rx
        noiseIfPower - total noise power over the linkbandwidth
        rate - target rate in bps
        pMax - maximum allowed transmission power
        p0 - power consumption at zero transmission (not sleep)
        m - power consumption load factor
        verbosity - IPOPT verbosity level
    Output:
        obj - solution objective value
        solution - resource share per user
        status - IPOPT status '''

    # the channel dimensions tell some more parameters
    users = channel.shape[0]
    n_tx  = channel.shape[1]
    n_rx  = channel.shape[2]

    # preparing IPOPT parameters
    nvar  = users # for readability
    x_L = zeros((nvar), dtype=float_) * 0.0
    x_U = ones((nvar), dtype=float_) * 1.0
    ncon = nvar + 1 # transmit power constraints and the unit sum 
    g_L = zeros(1+nvar) # unit sum and all power constraints
    g_L[0] = 1.
    g_U = pMax * ones(1+nvar) # unit sum and all power constraints
    g_U[0] = 1.
    nnzj = nvar * (1+nvar)
    nnzh = 0 #used?
    x0 = repeat([1./(nvar+1)],nvar) # Starting point

    # IPOPT requires single parameter functions
    if n_tx is 2 and n_rx is 2:
        eval_f = lambda mus: optimMinPow2x2.eval_f(mus, noiseIfPower, channel, rate, linkBandwidth, p0, m)
        eval_grad_f = lambda mus: optimMinPow2x2.eval_grad_f(mus, noiseIfPower, channel, rate, linkBandwidth, p0, m)
        eval_g = lambda mus: optimMinPow2x2.eval_g(mus, noiseIfPower, channel, rate, linkBandwidth)
        eval_jac_g = lambda mus, flag: optimMinPow2x2.eval_jac_g(mus, noiseIfPower, channel, rate, linkBandwidth, flag)
    else:
        raise NotImplementedError # other combinations may be needed later

    # Call solve() 
    pyipopt.set_loglevel(min([2,verbosity])) # verbose
    nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g)
    #nlp.int_option("max_iter", 3000)
    #nlp.num_option("tol", 1e-8)
    #nlp.num_option("acceptable_tol", 1e-2)
    #nlp.int_option("acceptable_iter", 0)
    nlp.str_option("derivative_test", "first-order")
    nlp.str_option("derivative_test_print_all", "no")
    #nlp.str_option("print_options_documentation", "yes")
    nlp.int_option("print_level", min([verbosity,12])) # maximum is 12
    nlp.str_option("print_user_options", "yes")
    
    solution, zl, zu, obj, status = nlp.solve(x0)
    nlp.close()

    return obj, solution, status
Esempio n. 6
0
    def ipopt_naive(self,
                    start=None,
                    iters=3000,
                    acc=1E-5,
                    callback=True,
                    verbose=False):
        """
        x - # of variables
        xl - lower bounds of variables
        xu - upper bounds of variables
        m - # of constraints
        gl - lower bounds of constraints
        gu - upper bounds of constraints
        nnzj - number of nonzero values in jacobian
        nnzh - number of nonzero values in hessian (set to 0 if eval_h is not used)
        eval_f - objective function
        eval_grad_f - calculates gradient of objective function
        eval_g - calculates constraint values
        eval_jac_g - calculates jacobian
        eval_h - calculates hessian (optional, if not used set nnzh to 0)
        """

        try:
            if not (hasattr(IPOPT, "create")):
                print "WARNING: IPOPT not correctly installed (not start)"
                return start
        except NameError:
            print "WARNING: IPOPT is not installed!"
            return start

        if start is None:
            start = self.defaultState.extract(excludeKeys=self.excludeKeys)
        if callback == True:
            callback = self.robot.cleanupCallback
        #start = N.array(start, subok=True)
        numConst, lCBounds, uCBounds = self.getConstData()
        numVar = start.size
        lVBounds = []
        uVBounds = []
        for i in range(start.size / self.robot.nvars):
            lVBounds += self.robot.xLBounds
            uVBounds += self.robot.xUBounds
        lVBounds = N.array(lVBounds, dtype=float)
        uVBounds = N.array(uVBounds, dtype=float)

        struct = self.eval_jac_g(start, False)
        numNonZero = struct.size

        ipprob = IPOPT.create(numVar, lVBounds, uVBounds, numConst, lCBounds,
                              uCBounds, numNonZero, 0, self.eval_f,
                              self.eval_grad_f, self.eval_g, self.eval_jac_g)
        ipprob.num_option('tol', acc)
        ipprob.int_option('max_iter', iters)
        # call solve with initial state (start)
        x, zl, zu, constraint_multipliers, obj, status = ipprob.solve(start)
        ipprob.close()
        return x
Esempio n. 7
0
  def solve(self):
    x0 = self._robot.configuration()
    nbVar = len(x0)

    xL = [-math.pi] * nbVar
    xU = [math.pi] * nbVar
    cL = []
    cU = []
    nCon = 0
    nnzH = 0
    ssR = []
    ssC = []

    for c in self._constraint:
      cL.append(c.lBounds())
      cU.append(c.uBounds())
      ss = c.sparseStruct()
      ssR += (np.array(ss[0]) + nCon).tolist()
      ssC += ss[1]
      nCon += c.constraintCount()

    ss = (np.array(ssR), np.array(ssC))
    nnzJ = len(ssC)

    def evalF(x, user=None):
      self._robot.configure(x)
      return 0.0

    def evalFJ(x, user=None):
      self._robot.configure(x)
      return np.zeros(nbVar)

    def evalC(x, user=None):
      self._robot.configure(x)
      l = []
      for c in self._constraint:
        l += c.eval(x)

      return np.array(l)

    def evalCJ(x, flag, user=None):
      if flag:
        return ss
      else:
        l = []
        for c in self._constraint:
          l += c.evalJ(x)

        return np.array(l)


    opt = pyipopt.create(nbVar, np.array(xL), np.array(xU),
                         nCon, np.array(cL), np.array(cU),
                         nnzJ, nnzH, evalF, evalFJ, evalC, evalCJ)

    x, zl, zu, obj, status = opt.solve(np.array(x0))
    opt.close()
Esempio n. 8
0
    def start_iteration(self):
        """Perform initial setup before iteration loop begins."""
        
        self._config_ipopt()
        
        # get the initial values of the parameters
        for i, val in enumerate(self.get_parameters().values()):
            self.design_vals[i] = val.evaluate(self.parent)
            
        self.update_constraints()
        
        x_L = array( [ x.low for x in self.get_parameters().values() ] )
        x_U = array( [ x.high for x in self.get_parameters().values() ] )
        # Ipopt treats equality and inequality constraints together.
        # For equality constraints, just set the g_l and g_u to be
        # equal. For this driver, the inequality constraints come
        # first. g_l is set to 0.0 and g_l is set to the largest float.
        # For the equality constraints, both g_l and g_u are set to zero.
        g_L = zeros( self.num_constraints, 'd')
        g_U = zeros( self.num_constraints, 'd')
        for i in range( self.num_ineq_constraints ):
            g_U[i] = sys.float_info.max

        # number of non zeros in Jacobian
        nnzj = self.num_params * self.num_constraints 
                           # of constraints. Assumed to be dense
        # number of non zeros in hessian
        nnzh = self.num_params * ( self.num_params + 1 ) / 2 

        try:
            self.nlp = pyipopt.create(
               self.num_params, x_L, x_U,
               self.num_constraints, g_L, g_U,
               nnzj, nnzh,
               eval_f, eval_grad_f,
               eval_g, eval_jac_g,
               intermediate_callback
               # f2py lets you pass extra args to
               # callback functions
               # http://cens.ioc.ee/projects/f2py2e/usersguide/
               #     index.html#call-back-arguments
               # We pass the driver itself to the callbacks
               ### not using them for now
               #             eval_f_extra_args = (self,),
               #             eval_grad_f_extra_args = (self,),
               #             eval_g_extra_args = (self,),
               #             eval_jac_g_extra_args = (self,),
               #             intermediate_cb_extra_args = (self,),
               )
            
            self.nlp.set_intermediate_callback( intermediate_callback )

        except Exception, err:
            self._logger.error(str(err))
            raise
Esempio n. 9
0
 def solve_constr(theta0, use_hess = False):
     pyipopt.set_loglevel(1)    
     n = theta0.size    
     x_L = np.array([pyipopt.NLP_LOWER_BOUND_INF]*n, dtype=float)
     x_U = np.array([pyipopt.NLP_UPPER_BOUND_INF]*n, dtype=float)        
     ncon = pstationtrue.size
     g_L = g_U = pstationtrue    
     nnzj = maskj.sum()
     nnzh = maskh.sum()    
     idxrj, idxcj = np.mgrid[:ncon, :n]
     idxrh, idxch = np.mgrid[:n, :n]    
     eval_c = lambda t: constr(t)
     eval_j = lambda t, f: (idxrj[maskj], idxcj[maskj]) if f else np.squeeze(jab(t))[maskj]
     if use_hess:
         eval_h = lambda t, l, o, f: (idxrh[maskh], idxch[maskh]) if f else np.squeeze(hess_constr(t,l,o))[maskh]
         nlp = pyipopt.create(theta0.size, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad, eval_c, eval_j, eval_h)
     else:
         nlp = pyipopt.create(theta0.size, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad, eval_c, eval_j)
     results = nlp.solve(theta0)
     nlp.close()
     return results
Esempio n. 10
0
def main():

    # verbose
    pyipopt.set_loglevel(2)

    # define the parameters and their box constraints
    nvar = 2
    x_L = numpy.array([-3, -3], dtype=float)
    x_U = numpy.array([3, 3], dtype=float)

    # define the inequality constraints
    ncon = 0
    g_L = numpy.array([], dtype=float)
    g_U = numpy.array([], dtype=float)

    # define the number of nonzeros in the jacobian and in the hessian
    # there are no nonzeros in the constraint jacobian
    nnzj = 0

    # there are maximum nonzeros (nvar*(nvar+1))/2 in the lagrangian hessian
    nnzh = 3

    # create the nonlinear programming model
    nlp = pyipopt.create(
        nvar,
        x_L,
        x_U,
        ncon,
        g_L,
        g_U,
        nnzj,
        nnzh,
        eval_f,
        eval_grad_f,
        eval_g,
        eval_jac_g,
        eval_h,
        apply_new,
    )

    # define the initial guess
    x0 = numpy.array([-1.2, 1], dtype=float)

    # compute the results using ipopt
    results = nlp.solve(x0)

    # free the model
    nlp.close()

    # report the results
    print results
Esempio n. 11
0
def main():

    # verbose
    pyipopt.set_loglevel(2)

    # define the parameters and their box constraints
    nvar = 2
    x_L = numpy.array([-3, -3], dtype=float)
    x_U = numpy.array([3, 3], dtype=float)

    # define the inequality constraints
    ncon = 0
    g_L = numpy.array([], dtype=float)
    g_U = numpy.array([], dtype=float)

    # define the number of nonzeros in the jacobian and in the hessian
    # there are no nonzeros in the constraint jacobian
    nnzj = 0

    # there are maximum nonzeros (nvar*(nvar+1))/2 in the lagrangian hessian
    nnzh = 3

    # create the nonlinear programming model
    nlp = pyipopt.create(
            nvar,
            x_L,
            x_U,
            ncon,
            g_L,
            g_U,
            nnzj,
            nnzh,
            eval_f,
            eval_grad_f,
            eval_g,
            eval_jac_g,
            eval_h,
            apply_new,
            )

    # define the initial guess
    x0 = numpy.array([-1.2, 1], dtype=float)

    # compute the results using ipopt
    results = nlp.solve(x0)

    # free the model
    nlp.close()

    # report the results
    print(results)
Esempio n. 12
0
    def __init__(self,ListStreams,ListUnits,CFlag=5,PrintOpt=0,Xtol=1e-6,iter=100):
        self.XFlag=[]
        self.X=[]
        self.Sigma=[]
        self.CFlag=CFlag
        self.ListStreams=ListStreams
        self.ListUints=ListUnits
        self.ConstructXFlag()
        #self.ConstructX()
        self.Xmeas=self.X
        self.JacobianSize()
        if (self.CFlag==5 or self.CFlag==6 or self.CFlag==7):
            self.JacoNorm()
        self.NonZeroJacoRowCol()
        self.NonZeroHessRowCol()
        self.nnzh=len(self.NonZeroHessRow)
        self.nnzj=len(self.NonZeroJacoRow)
        self.ConstraintBounds()
        self.nlp = pyipopt.create(self.Xlen, self.XLB, self.XUB, self.Glen, self.GLB, self.GUB, self.nnzj, self.nnzh, self.Objective, self.obj_grad, self.Constraints, self.ConstructJaco, self.Hessian)
        self.nlp.int_option('print_level',PrintOpt)
        self.nlp.int_option('max_iter',iter)
        self.nlp.num_option('tol',Xtol)
#         self.nlp.num_option('dual_inf_tol',500000)
#         self.nlp.str_option('accept_every_trial_step','yes')
#         '''==============Derivative Check============================'''
#         self.nlp.str_option('derivative_test','only-second-order')
#         self.nlp.num_option('derivative_test_perturbation',1e-6)
#         self.nlp.num_option('derivative_test_tol',1e-5)
#         '''================================================================='''
        #self.nlp.str_option('hessian_approximation','limited-memory')
        #self.nlp.str_option('nlp_scaling_method','none')
        #self.nlp.num_option('nlp_scaling_min_value',0.01)
        #self.nlp.str_option('alpha_for_y','full')
        #self.nlp.num_option('constr_viol_tol',1e-4)
        #nlp.str_option('expect_infeasible_problem','yes')
        #nlp.str_option('start_with_resto','yes')
        self.X0 = asarray(self.Xmeas,dtype=float_)
        self.Validation()
        self.ExitFlag={}
        self.Xopt, self.zl, self.zu, self.constraint_multipliers, self.obj, self.status = self.nlp.solve(self.X0)
        self.ExitFlag[-1]=self.status
        for i in range(0):
            if (self.status !=0):
                self.Xopt, self.zl, self.zu, self.constraint_multipliers, self.obj, self.status = self.nlp.solve(self.Xopt)
                self.ExitFlag[i]=self.status
                if (self.status==0):
                    self.OptimIndex=i
                    print 'sucess ',i
                    break      
        self.nlp.close()
Esempio n. 13
0
def solve (p, warm_start):
    global P
    P = p
    t0 = time.time()



    # Create the problem for Ipopt to solve
    P.nlp = pyipopt.create(P.nvar, P.x_L, P.x_U, P.ncon, P.g_L, P.g_U, P.nnjz, P.nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g)

    # Set Ipopt solve options
    """
    P.nlp.int_option("print_level",1)
    P.nlp.num_option("acceptable_tol",10.0)
    P.nlp.num_option("tol",10.0)
    P.nlp.num_option("dual_inf_tol",1000.0)
    P.nlp.num_option("compl_inf_tol",1000.0)
    P.nlp.int_option("max_iter", 10000)
    P.nlp.str_option("linear_solver", "mumps")
    """

    P.nlp.str_option("linear_solver", "mumps")
    P.nlp.str_option("jacobian_approximation", "exact")
    P.nlp.str_option("hessian_approximation", "limited-memory")
    P.nlp.num_option("max_cpu_time", 40.0)
    P.nlp.num_option("tol", 10.0)
    P.nlp.str_option("print_timing_statistics", "no")
    P.nlp.str_option("print_user_options", "no")
    P.nlp.int_option("print_level",4)

    if warm_start:
        P.nlp.str_option('mu_strategy', 'adaptive')
        P.nlp.str_option("warm_start_init_point", "yes")
    #P.nlp.str_option("derivative_test","first-order")

    print "Calling solve"

    x, zl, zu, constrain_mu, obj, status = P.nlp.solve(P.x) # Solve the NLP problem with Ipopt
    #nlp.close()
    if status == 0:
        solve_time = time.time() - t0
        print "Solution found in: ",solve_time," (sec)"
        P.x = x
        P.solved = True
    else:
        print "Failed to find solution!"
        P.x = x
        P.solved = False
Esempio n. 14
0
    def __build_pyipopt_problem(self):
        """Build the pyipopt problem from the OptimizationProblem instance."""

        import pyipopt
        from functools import partial

        self.rfn = ReducedFunctionalNumPy(self.problem.reduced_functional)
        ncontrols = len(self.rfn.get_controls())

        (lb, ub) = self.__get_bounds()
        (nconstraints, fun_g, jac_g, clb, cub) = self.__get_constraints()
        constraints_nnz = nconstraints * ncontrols

        # A callback that evaluates the functional and derivative.
        J = self.rfn.__call__
        dJ = partial(self.rfn.derivative, forget=False)

        nlp = pyipopt.create(
            len(ub),  # length of control vector
            lb,  # lower bounds on control vector
            ub,  # upper bounds on control vector
            nconstraints,  # number of constraints
            clb,  # lower bounds on constraints,
            cub,  # upper bounds on constraints,
            constraints_nnz,  # number of nonzeros in the constraint Jacobian
            0,  # number of nonzeros in the Hessian
            J,  # to evaluate the functional
            dJ,  # to evaluate the gradient
            fun_g,  # to evaluate the constraints
            jac_g)  # to evaluate the constraint Jacobian

        pyipopt.set_loglevel(1)  # turn off annoying pyipopt logging

        if rank(self.problem.reduced_functional.mpi_comm()) > 0:
            nlp.int_option('print_level',
                           0)  # disable redundant IPOPT output in parallel
        else:
            nlp.int_option('print_level', 6)  # very useful IPOPT output

        if isinstance(self.problem, MaximizationProblem):
            # multiply objective function by -1 internally in
            # ipopt to maximise instead of minimise
            nlp.num_option('obj_scaling_factor', -1.0)

        self.pyipopt_problem = nlp
Esempio n. 15
0
    def __build_pyipopt_problem(self):
        """Build the pyipopt problem from the OptimizationProblem instance."""

        import pyipopt
        from functools import partial

        self.rfn = ReducedFunctionalNumPy(self.problem.reduced_functional)
        ncontrols = len(self.rfn.get_controls())

        (lb, ub) = self.__get_bounds()
        (nconstraints, fun_g, jac_g, clb, cub) = self.__get_constraints()
        constraints_nnz = nconstraints * ncontrols

        # A callback that evaluates the functional and derivative.
        J = self.rfn.__call__
        dJ = partial(self.rfn.derivative, forget=False)

        nlp = pyipopt.create(
            len(ub),  # length of control vector
            lb,  # lower bounds on control vector
            ub,  # upper bounds on control vector
            nconstraints,  # number of constraints
            clb,  # lower bounds on constraints,
            cub,  # upper bounds on constraints,
            constraints_nnz,  # number of nonzeros in the constraint Jacobian
            0,  # number of nonzeros in the Hessian
            J,  # to evaluate the functional
            dJ,  # to evaluate the gradient
            fun_g,  # to evaluate the constraints
            jac_g,
        )  # to evaluate the constraint Jacobian

        pyipopt.set_loglevel(1)  # turn off annoying pyipopt logging

        if rank(self.problem.reduced_functional.mpi_comm()) > 0:
            nlp.int_option("print_level", 0)  # disable redundant IPOPT output in parallel
        else:
            nlp.int_option("print_level", 6)  # very useful IPOPT output

        if isinstance(self.problem, MaximizationProblem):
            # multiply objective function by -1 internally in
            # ipopt to maximise instead of minimise
            nlp.num_option("obj_scaling_factor", -1.0)

        self.pyipopt_problem = nlp
    def solve_init(self, eval_J, eval_dJ):
        nvar = self.num_angles
        low_var = -numpy.infty * numpy.ones(nvar, dtype=float)
        up_var = numpy.infty * numpy.ones(nvar, dtype=float)

        self.nlp = pyipopt.create(
            nvar,  # Number of controls
            low_var,  # Lower bounds for Control
            up_var,  # Upper bounds for Control
            0,  # Number of constraints
            numpy.array([], dtype=float),
            numpy.array([], dtype=float),
            0,  # Number of nonzeros in cons. Jac
            0,  # Number of nonzeros in cons. Hes
            lambda angle: eval_J(angle),  # Objective eval
            lambda angle: eval_dJ(angle),  # Obj. grad eval
            self.func_g,
            self.jac_g)
Esempio n. 17
0
def estimate_alpha_beta(counts, X, ini=None, verbose=0,
                        use_empty_entries=True):
    """
    Estimate the parameters of g

    Parameters
    ----------
    counts: ndarray

    use_empty_entries: boolean, optional, default: True
        whether to use zeroes entries as information or not

    """
    m, n = X.shape
    nvar = 1
    ncon = 0
    nnzj = 0
    nnzh = 0

    x_L = np.array([- 10000.])
    x_U = np.array([10000000.])

    nlp = pyipopt.create(nvar, x_L, x_U, ncon, x_L,
                        x_U, nnzj, nnzh, eval_f,
                        eval_grad_f, eval_g, eval_jac_g)

    nlp.int_option('max_iter', 100)
    if ini is None:
        if verbose:
            print "Initial values not provided"
        ini = np.random.randint(1, 100, size=(1, )) + \
              np.random.random(size=(1, ))
    results = nlp.solve(ini, (m, n, counts, X, use_empty_entries))
    try:
        x, _, _, _, _ = results
    except ValueError:
        x, _, _, _, _, _ = results


    # Evaluate counts with new estimated model.
    d = euclidean_distances(X)
    mask = (np.tri(m, dtype=np.bool) == False) & (counts != 0) & (d != 0)
    beta = counts[mask].sum() / (d[mask] ** x[0]).sum()
    return x[0], beta
def optimizegraph(opt, max_iter=300, max_cpu_time=100):
    x_L, x_U = opt.bound_variables()
    g_L, g_U = opt.bound_constraints()
    nlp = pyipopt.create(opt.n_var, x_L, x_U, opt.n_cons, g_L, g_U, opt.nnzj, opt.nnzh, \
     opt.eval_objective, opt.eval_grad_objective, opt.eval_constraints, opt.eval_jac_constraint)
    nlp.num_option('max_cpu_time', max_cpu_time)
    nlp.int_option('print_level', 0)
    nlp.num_option('tol', 1e-12)
    nlp.num_option('acceptable_tol', 1e-12)
    x, zl, zu, constraint_multipliers, obj, status = nlp.solve(
        initialize_flow(opt))
    nlp.close()
    # reinforce flow balance
    x = reinforce_flowbalance(opt, x)
    # scale x
    x = np.maximum(x, 0)
    s = np.sum(opt.weights) / np.dot(opt.efflen, x)
    opt.results = x * s
    return opt, status
Esempio n. 19
0
    def _solve(self, x0, A, l, u, xmin, xmax):
        """ Solves using the Interior Point OPTimizer.
        """
        # Indexes of constrained lines.
        il = [i for i,ln in enumerate(self._ln) if 0.0 < ln.rate_a < 1e10]
        nl2 = len(il)

        neqnln = 2 * self._nb # no. of non-linear equality constraints
        niqnln = 2 * len(il)  # no. of lines with constraints

        user_data = {"A": A, "neqnln": neqnln, "niqnln": niqnln}

        self._f(x0)
        Jdata = self._dg(x0, False, user_data)
#        Hdata = self._h(x0, ones(neqnln + niqnln), None, False, user_data)

        lmbda = {"eqnonlin": ones(neqnln),
                 "ineqnonlin": ones(niqnln)}
        H = tril(self._hessfcn(x0, lmbda), format="coo")
        self._Hrow, self._Hcol = H.row, H.col

        n = len(x0) # the number of variables
        xl = xmin
        xu = xmax
        gl = r_[zeros(2 * self._nb), -Inf * ones(2 * nl2), l]
        gu = r_[zeros(2 * self._nb),       zeros(2 * nl2), u]
        m = len(gl) # the number of constraints
        nnzj = len(Jdata) # the number of nonzeros in Jacobian matrix
        nnzh = 0#len(H.data) # the number of non-zeros in Hessian matrix

        f_fcn, df_fcn, g_fcn, dg_fcn, h_fcn = \
            self._f, self._df, self._g, self._dg, self._h

        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             f_fcn, df_fcn, g_fcn, dg_fcn)#, h_fcn)

#        print dir(nlp)
#        nlp.str_option("print_options_documentation", "yes")
#        nlp.int_option("max_iter", 10)

#        x, zl, zu, obj = nlp.solve(x0)
        success = nlp.solve(x0, user_data)
        nlp.close()
Esempio n. 20
0
    def _solve(self, x0, A, l, u, xmin, xmax):
        """ Solves using the Interior Point OPTimizer.
        """
        # Indexes of constrained lines.
        il = [i for i, ln in enumerate(self._ln) if 0.0 < ln.rate_a < 1e10]
        nl2 = len(il)

        neqnln = 2 * self._nb  # no. of non-linear equality constraints
        niqnln = 2 * len(il)  # no. of lines with constraints

        user_data = {"A": A, "neqnln": neqnln, "niqnln": niqnln}

        self._f(x0)
        Jdata = self._dg(x0, False, user_data)
        #        Hdata = self._h(x0, ones(neqnln + niqnln), None, False, user_data)

        lmbda = {"eqnonlin": ones(neqnln), "ineqnonlin": ones(niqnln)}
        H = tril(self._hessfcn(x0, lmbda), format="coo")
        self._Hrow, self._Hcol = H.row, H.col

        n = len(x0)  # the number of variables
        xl = xmin
        xu = xmax
        gl = r_[zeros(2 * self._nb), -Inf * ones(2 * nl2), l]
        gu = r_[zeros(2 * self._nb), zeros(2 * nl2), u]
        m = len(gl)  # the number of constraints
        nnzj = len(Jdata)  # the number of nonzeros in Jacobian matrix
        nnzh = 0  #len(H.data) # the number of non-zeros in Hessian matrix

        f_fcn, df_fcn, g_fcn, dg_fcn, h_fcn = \
            self._f, self._df, self._g, self._dg, self._h

        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh, f_fcn, df_fcn,
                             g_fcn, dg_fcn)  #, h_fcn)

        #        print dir(nlp)
        #        nlp.str_option("print_options_documentation", "yes")
        #        nlp.int_option("max_iter", 10)

        #        x, zl, zu, obj = nlp.solve(x0)
        success = nlp.solve(x0, user_data)
        nlp.close()
Esempio n. 21
0
def improve_ipopt(x0, prob, *args, **kwargs):
    try:
        import pyipopt
    except ImportError:
        raise Exception("PyIpopt package is not installed.")

    lb = pyipopt.NLP_LOWER_BOUND_INF
    ub = pyipopt.NLP_UPPER_BOUND_INF
    g_L = np.zeros(prob.m)
    for i in range(prob.m):
        if prob.fs[i].relop == '<=':
            g_L[i] = lb
    g_U = np.zeros(prob.m)

    def eval_grad_f(x, user_data = None):
        return 2*prob.f0.P.dot(x) + prob.f0.qarray
    def eval_g(x, user_data = None):
        return np.array([f.eval(x) for f in prob.fs])

    jac_grid = np.indices((prob.m, prob.n))
    jac_r = jac_grid[0].ravel()
    jac_c = jac_grid[1].ravel()
    def eval_jac_g(x, flag, user_data = None):
        if flag:
            return (jac_r, jac_c)
        else:
            return np.vstack([2*f.P.dot(x)+f.qarray for f in prob.fs])

    nlp = pyipopt.create(
        prob.n, lb*np.ones(prob.n), ub*np.ones(prob.n),
        prob.m, g_L, g_U, prob.m*prob.n, 0,
        prob.f0.eval, eval_grad_f,
        eval_g, eval_jac_g
    )
    try:
        x, zl, zu, constraint_multipliers, obj, status = nlp.solve(x0)
    except:
        pass

    return x
Esempio n. 22
0
def OptSolver(bounds, OptObj, max_iter=10):
    """
    Function that returns a pyipopt object with OptObj atributes
    """

    # Number of Design Variables
    nvar = OptObj.nvars
    # Upper and lower bounds
    x_L = numpy.ones(nvar) * bounds[0]
    x_U = numpy.ones(nvar) * bounds[1]
    # Number of non-zeros gradients
    constraints_nnz = nvar*OptObj.cst_num
    acst_L = numpy.array(OptObj.cst_L)
    acst_U = numpy.array(OptObj.cst_U)

    PyIpOptObj = pyipopt.create(nvar,               # number of the design variables
                         x_L,                       # lower bounds of the design variables
                         x_U,                       # upper bounds of the design variables
                         OptObj.cst_num,            # number of constraints
                         acst_L,                    # lower bounds on constraints,
                         acst_U,                    # upper bounds on constraints,
                         constraints_nnz,           # number of nonzeros in the constraint Jacobian
                         0,                         # number of nonzeros in the Hessian
                         OptObj.obj_fun,            # objective function
                         OptObj.obj_dfun,           # gradient of the objective function
                         OptObj.cst_fval,           # constraint function
                         OptObj.jacobian )          # gradient of the constraint function

    #Parameters
    PyIpOptObj.num_option('acceptable_tol', 1.0e-10)
    PyIpOptObj.num_option('eta_phi', 1e-12)                 # eta_phi: Relaxation factor in the Armijo condition.
    PyIpOptObj.num_option('theta_max_fact', 30000)	        # Determines upper bound for constraint violation in the filter.
    PyIpOptObj.int_option('max_soc', 20)
    PyIpOptObj.int_option('max_iter', max_iter)
    PyIpOptObj.int_option('watchdog_shortened_iter_trigger', 20)
    PyIpOptObj.int_option('accept_after_max_steps', 5)
    pyipopt.set_loglevel(1)                                 # turn off annoying pyipopt logging
    PyIpOptObj.int_option('print_level', 6)                 # very useful IPOPT output

    return PyIpOptObj
def main():
    cable_positions_numpy = numpy.array(cable_positions)

    print(cable_positions)

    def eval_robust_J(cable_positions):
        if max(eval_g(cable_positions)) > 0:
            print("Warning: Inequality constraint is not satisfied")
            return numpy.inf
        # print eval_g(cable_positions).T
        try:
            j = eval_J(cable_positions)
            # c1 = numpy.array([cable_positions[0],cable_positions[1]])
            # c2 = numpy.array([cable_positions[2],cable_positions[3]])
            # c3 = numpy.array([cable_positions[4],cable_positions[5]])
            # print("Angles between the three cables: %.2f, %.2f, %.2f"
            #       % (numpy.arccos(numpy.dot(c1-c2,c1-c3)/
            #                       (numpy.sqrt(numpy.dot(c1-c2,c1-c2)
            #                                   *numpy.dot(c1-c3,c1-c3))))/(2*numpy.pi)*360,
            #          numpy.arccos(numpy.dot(c1-c2,c3-c2)/
            #                       (numpy.sqrt(numpy.dot(c1-c2,c1-c2)
            #                                   *numpy.dot(c3-c2,c3-c2))))/(2*numpy.pi)*360,
            #          numpy.arccos(numpy.dot(c3-c1,c3-c2)/
            #                       (numpy.sqrt(numpy.dot(c3-c1,c3-c1)
            #                                   *numpy.dot(c3-c2,c3-c2))))/(2*numpy.pi)*360))
        except:
            print("ERROR: Forward model failed, returning infinity")
            j = numpy.inf
            exit(1)

        #print( "J = ", j)
        return j

    def eval_robust_dJ(cable_positions):
        if max(eval_g(cable_positions)) > 0:
            print("Warning: Inequality constraint is not satisfied")
            return numpy.inf
        dj = eval_dJ(cable_positions)
        print("|dj| = ", numpy.dot(dj, dj)**0.5)
        return dj

    def test_J(cable_positions):
        """ Test of optimization without state eq"""
        j = 0.5 * numpy.dot(cable_positions, cable_positions)
        if max(eval_g(cable_positions)) > 0:
            print("Warning: Inequality constraint is not satisfied: ")
            print(eval_g(cable_positions))
            print(cable_positions)
            print("*" * 25)
        print("J = ", j)
        return j

    def test_dJ(cable_positions):
        """ Test of optimization without state eq"""
        dj = numpy.array(cable_positions)
        print("|dj| = ", numpy.dot(dj, dj)**0.5)
        return dj

    # Check that input data match input variables
    nvar = int(2 * num_cables)  # Number of controls
    ncon = int(num_cables + num_cables *
               (num_cables - 1) / 2)  # Number of inequality constraints

    # Create the NLP model
    #pyipopt.set_loglevel(2)         # Set verbosity
    nlp = pyipopt.create(
        nvar,  # Number of controls
        -numpy.inf * numpy.ones(nvar, dtype=float),  # Lower bounds of controls
        numpy.inf * numpy.ones(nvar, dtype=float),  # Upper bounds of controls
        ncon,  # Number of inequality constraints
        -numpy.inf * numpy.ones(
            ncon, dtype=float),  # Lower bounds of inequality constraints
        numpy.zeros(ncon,
                    dtype=float),  # Upper bounds of inequality constraints
        nvar * ncon,  # Number of nonzeros in the constraint Jacobian
        0,  # Number of nonzeros in the Hessian
        lambda pos: eval_robust_J(pos),  # Objective evaluation
        lambda pos: eval_robust_dJ(pos),  # Objective gradient evaluation
        # lambda pos: test_J(pos),                      # Objective evaluation
        # lambda pos: test_dJ(pos),                     # Objective gradient evaluation
        eval_g,  # Constraint evaluation
        eval_jac_g,  # Constraint Jacobian evaluation
    )
    nlp.num_option('obj_scaling_factor',
                   1e-1)  # 1e-2 for isoceles example as gradient is bigger
    # 1e-1 for 3 cables as gradient is smaller
    nlp.int_option('max_iter', 100)
    nlp.num_option('acceptable_tol', 1e-2)
    nlp.num_option('tol', 1e-2)
    nlp.num_option('bound_relax_factor',
                   0)  # So it does not violate the boundary constraint

    # nlp.str_option('mu_strategy',"adaptive")
    #nlp.str_option('nlp_scaling_method',"gradient-based")
    # nlp.num_option("mu_max", 0.1)
    # nlp.num_option('mu_init', 0.1)#1e25)
    #nlp.num_option('bound_relax_factor', 0) # So it does not violate the boundary constraint
    # nlp.num_option('acceptable_tol', 1e-5)
    # Solve the optimisation problem
    opt_cable_pos = nlp.solve(cable_positions_numpy)[0]
    nlp.close()

    # Report the results
    d = opt_cable_pos.reshape(-1, 2)
    for i in range(num_cables):
        max_radius = outer_cable_mesh_radius - inner_cable_mesh_radius - distance_from_outer_cable_mesh
        print("Cable %d distance from center: %.3e" %
              (i, numpy.sqrt(d[i, 0]**2 + d[i, 1]**2)))
        for j in range(i + 1, num_cables):
            print("Distance between Cable %d and %d: %.3e" %
                  (i, j, numpy.sqrt(
                      (d[i, 0] - d[j, 0])**2) + (d[i, 1] - d[j, 1])**2))
    print("Cable_positions:")
    for i in range(num_cables):
        print("%10.3e, %10.3e" % (d[i, 0], d[i, 1]))
    update_mesh(opt_cable_pos)

    # dolfin.plot(update_mesh(opt_cable_pos))
    TempFiles = [
        dolfin.File("output/T_ipopt_part%d.pvd" % i)
        for i in range(num_cables + 1)
    ]
    eval_J(cable_positions)
    for i in range(num_cables + 1):
        TempFiles[i] << T.part(i)

    eval_J(opt_cable_pos)
    for i in range(num_cables + 1):
        TempFiles[i] << T.part(i)
    c1 = d[0, :]
    c2 = d[1, :]
    c3 = d[2, :]
    print(
        numpy.arccos(
            numpy.dot(c1 - c2, c1 - c3) / (numpy.sqrt(
                numpy.dot(c1 - c2, c1 - c2) * numpy.dot(c1 - c3, c1 - c3)))) /
        (2 * numpy.pi) * 360)
    print(
        numpy.arccos(
            numpy.dot(c1 - c2, c3 - c2) / (numpy.sqrt(
                numpy.dot(c1 - c2, c1 - c2) * numpy.dot(c3 - c2, c3 - c2)))) /
        (2 * numpy.pi) * 360)
    print(
        numpy.arccos(
            numpy.dot(c3 - c1, c3 - c2) / (numpy.sqrt(
                numpy.dot(c3 - c1, c3 - c1) * numpy.dot(c3 - c2, c3 - c2)))) /
        (2 * numpy.pi) * 360)
Esempio n. 24
0
    def __solve__(
        self,
        opt_problem={},
        sens_type="FD",
        store_sol=True,
        disp_opts=False,
        store_hst=False,
        hot_start=False,
        sens_mode="",
        sens_step={},
        *args,
        **kwargs
    ):
        """
        Run Optimizer (Optimize Routine)

        **Keyword arguments:**

        - opt_problem -> INST: Optimization instance
        - sens_type -> STR/FUNC: Gradient type, *Default* = 'FD'
        - store_sol -> BOOL: Store solution in Optimization class flag,
          *Default* = True
        - disp_opts -> BOOL: Flag to display options in solution text, *Default*
          = False
        - store_hst -> BOOL/STR: Flag/filename to store optimization history,
          *Default* = False
        - hot_start -> BOOL/STR: Flag/filename to read optimization history,
          *Default* = False
        - sens_mode -> STR: Flag for parallel gradient calculation, *Default* =
          ''
        - sens_step -> FLOAT: Sensitivity setp size, *Default* = {} [corresponds
          to 1e-6 (FD), 1e-20(CS)]

        Documentation last updated:  Feb. 2, 2011 - Peter W. Jansen
        """

        self.pll = False
        self.myrank = 0

        myrank = self.myrank

        tmp_file = False
        def_fname = self.options["output_file"][1].split(".")[0]
        if isinstance(store_hst, str):
            if isinstance(hot_start, str):
                if myrank == 0:
                    if store_hst == hot_start:
                        hos_file = History(hot_start, "r", self)
                        log_file = History(store_hst + "_tmp", "w", self, opt_problem.name)
                        tmp_file = True
                    else:
                        hos_file = History(hot_start, "r", self)
                        log_file = History(store_hst, "w", self, opt_problem.name)
                    # end
                # end
                self.sto_hst = True
                self.h_start = True
            elif hot_start:
                if myrank == 0:
                    hos_file = History(store_hst, "r", self)
                    log_file = History(store_hst + "_tmp", "w", self, opt_problem.name)
                    tmp_file = True
                # end
                self.sto_hst = True
                self.h_start = True
            else:
                if myrank == 0:
                    log_file = History(store_hst, "w", self, opt_problem.name)
                # end
                self.sto_hst = True
                self.h_start = False
            # end
        elif store_hst:
            if isinstance(hot_start, str):
                if hot_start == def_fname:
                    if myrank == 0:
                        hos_file = History(hot_start, "r", self)
                        log_file = History(def_fname + "_tmp", "w", self, opt_problem.name)
                        tmp_file = True
                    # end
                else:
                    if myrank == 0:
                        hos_file = History(hot_start, "r", self)
                        log_file = History(def_fname, "w", self, opt_problem.name)
                    # end
                # end
                self.sto_hst = True
                self.h_start = True
            elif hot_start:
                if myrank == 0:
                    hos_file = History(def_fname, "r", self)
                    log_file = History(def_fname + "_tmp", "w", self, opt_problem.name)
                    tmp_file = True
                # end
                self.sto_hst = True
                self.h_start = True
            else:
                if myrank == 0:
                    log_file = History(def_fname, "w", self, opt_problem.name)
                # end
                self.sto_hst = True
                self.h_start = False
            # end
        else:
            self.sto_hst = False
            self.h_start = False
        # end

        gradient = Gradient(opt_problem, sens_type, sens_mode, sens_step, *args, **kwargs)

        def eval_f(x, user_data=None):
            """IPOPT - Objective Value Function."""
            # Variables Groups Handling
            if opt_problem.use_groups:
                xg = {}
                for group in group_ids.keys():
                    if group_ids[group][1] - group_ids[group][0] == 1:
                        xg[group] = x[group_ids[group][0]]
                    else:
                        xg[group] = x[group_ids[group][0] : group_ids[group][1]]
                    # end
                # end
                xn = xg
            else:
                xn = x
            # end

            # Flush Output Files
            self.flushFiles()

            # Evaluate User Function
            fail = 0
            # if (myrank == 0):
            #    if self.h_start:
            #        [vals,hist_end] = hos_file.read(ident=['obj', 'con', 'fail'])
            #        if hist_end:
            #            self.h_start = False
            #            hos_file.close()
            #        else:
            #            [ff,gg,fail] = [vals['obj'][0][0],vals['con'][0],int(vals['fail'][0][0])]
            #        #end
            #    #end
            # end

            # if self.pll:
            #    self.h_start = Bcast(self.h_start,root=0)
            # end
            # if self.h_start and self.pll:
            #    [ff,gg,fail] = Bcast([ff,gg,fail],root=0)
            # else:
            [ff, gg, fail] = opt_problem.obj_fun(xn, *args, **kwargs)
            # end

            # Store History
            if myrank == 0:
                if self.sto_hst:
                    log_file.write(x, "x")
                    log_file.write(ff, "obj")
                    log_file.write(gg, "con")
                    log_file.write(fail, "fail")
                # end
            # end

            # Objective Assigment
            if isinstance(ff, complex):
                f = ff.astype(float)
            else:
                f = ff
            # end

            # Constraints Assigment
            g = numpy.zeros(len(opt_problem._constraints.keys()))
            for i in xrange(len(opt_problem._constraints.keys())):
                if isinstance(gg[i], complex):
                    g[i] = gg[i].astype(float)
                else:
                    g[i] = gg[i]
                # end
            # end

            return f

        def eval_g(x, user_data=None):

            # Variables Groups Handling
            if opt_problem.use_groups:
                xg = {}
                for group in group_ids.keys():
                    if group_ids[group][1] - group_ids[group][0] == 1:
                        xg[group] = x[group_ids[group][0]]
                    else:
                        xg[group] = x[group_ids[group][0] : group_ids[group][1]]
                    # end
                # end
                xn = xg
            else:
                xn = x
            # end

            # Flush Output Files
            self.flushFiles()

            # Evaluate User Function
            fail = 0
            #            if (myrank == 0):
            #                if self.h_start:
            #                    [vals,hist_end] = hos_file.read(ident=['obj', 'con', 'fail'])
            #                    if hist_end:
            #                        self.h_start = False
            #                        hos_file.close()
            #                    else:
            #                        [ff,gg,fail] = [vals['obj'][0][0],vals['con'][0],int(vals['fail'][0][0])]
            # end
            # end
            # end

            # if self.pll:
            #   self.h_start = Bcast(self.h_start,root=0)
            # end
            # if self.h_start and self.pll:
            #    [ff,gg,fail] = Bcast([ff,gg,fail],root=0)
            # else:
            [ff, gg, fail] = opt_problem.obj_fun(xn, *args, **kwargs)
            # end

            # Store History
            if myrank == 0:
                if self.sto_hst:
                    log_file.write(x, "x")
                    log_file.write(ff, "obj")
                    log_file.write(gg, "con")
                    log_file.write(fail, "fail")
                # end
            # end

            # Objective Assigment
            if isinstance(ff, complex):
                f = ff.astype(float)
            else:
                f = ff
            # end

            # Constraints Assigment
            g = numpy.zeros(len(opt_problem._constraints.keys()))
            for i in xrange(len(opt_problem._constraints.keys())):
                if isinstance(gg[i], complex):
                    g[i] = gg[i].astype(float)
                else:
                    g[i] = gg[i]
                # end
            # end

            return g

        def eval_grad_f(x, user_data=None):
            """IPOPT - Objective/Constraint Gradients Function."""
            # if self.h_start:
            #    if (myrank == 0):
            #        [vals,hist_end] = hos_file.read(ident=['grad_obj','grad_con'])
            #        if hist_end:
            #            self.h_start = False
            #            hos_file.close()
            #        else:
            #            dff = vals['grad_obj'][0].reshape((len(opt_problem._objectives.keys()),len(opt_problem._variables.keys())))
            #            dgg = vals['grad_con'][0].reshape((len(opt_problem._constraints.keys()),len(opt_problem._variables.keys())))
            #        #end
            #    #end
            #    if self.pll:
            #        self.h_start = Bcast(self.h_start,root=0)
            #    #end
            #    if self.h_start and self.pll:
            #        [dff,dgg] = Bcast([dff,dgg],root=0)
            #    #end
            # end

            # if not self.h_start:

            [f, g, fail] = opt_problem.obj_fun(x, *args, **kwargs)
            dff, dgg = gradient.getGrad(x, group_ids, [f], g, *args, **kwargs)

            # Store History
            if self.sto_hst and (myrank == 0):
                log_file.write(dff, "grad_obj")
                log_file.write(dgg, "grad_con")
            # end

            # Gradient Assignment
            df = numpy.zeros(len(opt_problem._variables.keys()))

            for i in xrange(len(opt_problem._variables.keys())):
                df[i] = dff[0, i]
            # end

            return df

        def eval_grad_g(x, flag, user_data=None):

            # if self.h_start:
            #    if (myrank == 0):
            #        [vals,hist_end] = hos_file.read(ident=['grad_obj','grad_con'])
            #        if hist_end:
            #            self.h_start = False
            #            hos_file.close()
            #        else:
            #            dff = vals['grad_obj'][0].reshape((len(opt_problem._objectives.keys()),len(opt_problem._variables.keys())))
            #            dgg = vals['grad_con'][0].reshape((len(opt_problem._constraints.keys()),len(opt_problem._variables.keys())))
            #        #end
            #    #end
            #    if self.pll:
            #        self.h_start = Bcast(self.h_start,root=0)
            #    #end
            #    if self.h_start and self.pll:
            #        [dff,dgg] = Bcast([dff,dgg],root=0)
            #    #end
            # end

            # if not self.h_start:

            if flag:
                a = numpy.zeros(len(opt_problem._variables.keys()) * len(opt_problem._constraints.keys()), int)
                b = numpy.zeros(len(opt_problem._variables.keys()) * len(opt_problem._constraints.keys()), int)

                for i in xrange(len(opt_problem._constraints.keys())):
                    for j in xrange(len(opt_problem._variables.keys())):
                        a[i * len(opt_problem._variables.keys()) + j] = i
                        b[i * len(opt_problem._variables.keys()) + j] = j
                return (a, b)

            else:
                [f, g, fail] = opt_problem.obj_fun(x, *args, **kwargs)
                dff, dgg = gradient.getGrad(x, group_ids, [f], g, *args, **kwargs)

                # Store History
                if self.sto_hst and (myrank == 0):
                    log_file.write(dff, "grad_obj")
                    log_file.write(dgg, "grad_con")
                # end

                # Gradient Assignment
                a = numpy.zeros([len(opt_problem._variables.keys()) * len(opt_problem._constraints.keys())])
                for i in xrange(len(opt_problem._constraints.keys())):
                    for j in xrange(len(opt_problem._variables.keys())):
                        a[i * len(opt_problem._variables.keys()) + j] = dgg[i, j]
                    # end
                # end

                return a

        # Variables Handling
        nvar = len(opt_problem._variables.keys())
        xl = []
        xu = []
        xx = []
        for key in opt_problem._variables.keys():
            if opt_problem._variables[key].type == "c":
                xl.append(opt_problem._variables[key].lower)
                xu.append(opt_problem._variables[key].upper)
                xx.append(opt_problem._variables[key].value)
            elif opt_problem._variables[key].type == "i":
                raise IOError("IPOPT cannot handle integer design variables")
            elif opt_problem._variables[key].type == "d":
                raise IOError("IPOPT cannot handle discrete design variables")
            # end
        # end
        xl = numpy.array(xl)
        xu = numpy.array(xu)
        xx = numpy.array(xx)

        # Variables Groups Handling
        group_ids = {}
        if opt_problem.use_groups:
            k = 0
            for key in opt_problem._vargroups.keys():
                group_len = len(opt_problem._vargroups[key]["ids"])
                group_ids[opt_problem._vargroups[key]["name"]] = [k, k + group_len]
                k += group_len
            # end
        # end

        # Constraints Handling
        ncon = len(opt_problem._constraints.keys())
        blc = []
        buc = []
        if ncon > 0:
            for key in opt_problem._constraints.keys():
                if opt_problem._constraints[key].type == "e":
                    blc.append(opt_problem._constraints[key].equal)
                    buc.append(opt_problem._constraints[key].equal)
                elif opt_problem._constraints[key].type == "i":
                    blc.append(opt_problem._constraints[key].lower)
                    buc.append(opt_problem._constraints[key].upper)
                # end
            # end
        else:
            if (store_sol) and (myrank == 0):
                print "Optimization Problem Does Not Have Constraints\n"
                print "Unconstrained Optimization Initiated\n"
            # end
            ncon = 1
            blc.append(-inf)
            buc.append(inf)
        # end
        blc = numpy.array(blc)
        buc = numpy.array(buc)

        # Objective Handling
        objfunc = opt_problem.obj_fun
        nobj = len(opt_problem._objectives.keys())
        ff = []
        for key in opt_problem._objectives.keys():
            ff.append(opt_problem._objectives[key].value)
        # end
        ff = numpy.array(ff)

        # Create an IPOPT instance problem
        nnzj = nvar * ncon
        nnzh = nvar * nvar
        ipopt = pyipopt.create(nvar, xl, xu, ncon, blc, buc, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_grad_g)

        # Setup Options
        optionss = self.options.copy()
        del optionss["defaults"]

        for i in optionss:
            if not self.options["defaults"][i][1] == optionss[i][1]:
                if self.options[i][0].__name__ == "int":
                    ipopt.int_option(i, self.options[i][1])

                if self.options[i][0].__name__ == "float":
                    ipopt.num_option(i, self.options[i][1])

                if self.options[i][0].__name__ == "str":
                    ipopt.str_option(i, self.options[i][1])

        # Run IPOPT

        t0 = time.time()
        r = ipopt.solve(xx)
        sol_time = time.time() - t0

        if myrank == 0:
            if self.sto_hst:
                log_file.close()
                if tmp_file:
                    hos_file.close()
                    name = hos_file.filename
                    os.remove(name + ".cue")
                    os.remove(name + ".bin")
                    os.rename(name + "_tmp.cue", name + ".cue")
                    os.rename(name + "_tmp.bin", name + ".bin")
                # end
            # end
        # end

        ipopt.close()

        # Store Results
        sol_inform = {}
        print r
        sol_inform["value"] = r[-1]  # ifail[0]
        sol_inform["text"] = self.getInform(r[-1])  # self.getInform(ifail[0])

        if store_sol:
            sol_name = "IPOPT Solution to " + opt_problem.name

            sol_options = copy.copy(self.options)
            if "default" in sol_options:
                del sol_options["defaults"]
            # end

            sol_evals = 0

            sol_vars = copy.deepcopy(opt_problem._variables)
            i = 0
            x = r[0]
            for key in sol_vars.keys():
                sol_vars[key].value = x[i]
                i += 1
            # end

            sol_objs = copy.deepcopy(opt_problem._objectives)
            sol_objs[0].value = r[4]

            sol_cons = {}

            if ncon > 0:
                sol_lambda = r[3]
            else:
                sol_lambda = {}
            # end

            opt_problem.addSol(
                self.__class__.__name__,
                sol_name,
                objfunc,
                sol_time,
                sol_evals,
                sol_inform,
                sol_vars,
                sol_objs,
                sol_cons,
                sol_options,
                display_opts=disp_opts,
                Lambda=sol_lambda,
                Sensitivities=sens_type,
                myrank=myrank,
                arguments=args,
                **kwargs
            )

        # end

        return ff, xx, sol_inform  # ifail[0]
Esempio n. 25
0
def ipoptopf_solver(om, ppopt):
    """Solves AC optimal power flow using IPOPT.

    Inputs are an OPF model object and a PYPOWER options vector.

    Outputs are a C{results} dict, C{success} flag and C{raw} output dict.

    C{results} is a PYPOWER case dict (ppc) with the usual C{baseMVA}, C{bus}
    C{branch}, C{gen}, C{gencost} fields, along with the following additional
    fields:
        - C{order}      see 'help ext2int' for details of this field
        - C{x}          final value of optimization variables (internal order)
        - C{f}          final objective function value
        - C{mu}         shadow prices on ...
            - C{var}
                - C{l}  lower bounds on variables
                - C{u}  upper bounds on variables
            - C{nln}
                - C{l}  lower bounds on nonlinear constraints
                - C{u}  upper bounds on nonlinear constraints
            - C{lin}
                - C{l}  lower bounds on linear constraints
                - C{u}  upper bounds on linear constraints

    C{success} is C{True} if solver converged successfully, C{False} otherwise

    C{raw} is a raw output dict in form returned by MINOS
        - C{xr}     final value of optimization variables
        - C{pimul}  constraint multipliers
        - C{info}   solver specific termination code
        - C{output} solver specific output information

    @see: L{opf}, L{pips}

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    @author: Richard Lincoln
    """
    import pyipopt

    ## unpack data
    ppc = om.get_ppc()
    baseMVA, bus, gen, branch, gencost = \
        ppc['baseMVA'], ppc['bus'], ppc['gen'], ppc['branch'], ppc['gencost']
    vv, _, nn, _ = om.get_idx()

    ## problem dimensions
    nb = shape(bus)[0]  ## number of buses
    ng = shape(gen)[0]  ## number of gens
    nl = shape(branch)[0]  ## number of branches
    ny = om.getN('var', 'y')  ## number of piece-wise linear costs

    ## linear constraints
    A, l, u = om.linear_constraints()

    ## bounds on optimization vars
    _, xmin, xmax = om.getv()

    ## build admittance matrices
    Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)

    ## try to select an interior initial point
    ll = xmin.copy()
    uu = xmax.copy()
    ll[xmin == -Inf] = -2e19  ## replace Inf with numerical proxies
    uu[xmax == Inf] = 2e19
    x0 = (ll + uu) / 2
    Varefs = bus[bus[:, BUS_TYPE] == REF, VA] * (pi / 180)
    x0[vv['i1']['Va']:vv['iN']['Va']] = Varefs[
        0]  ## angles set to first reference angle
    if ny > 0:
        ipwl = find(gencost[:, MODEL] == PW_LINEAR)
        #        PQ = r_[gen[:, PMAX], gen[:, QMAX]]
        #        c = totcost(gencost[ipwl, :], PQ[ipwl])
        ## largest y-value in CCV data
        c = gencost.flatten('F')[sub2ind(shape(gencost), ipwl,
                                         NCOST + 2 * gencost[ipwl, NCOST])]
        x0[vv['i1']['y']:vv['iN']['y']] = max(c) + 0.1 * abs(max(c))


#        x0[vv['i1']['y']:vv['iN']['y']) = c + 0.1 * abs(c)

## find branches with flow limits
    il = find((branch[:, RATE_A] != 0) & (branch[:, RATE_A] < 1e10))
    nl2 = len(il)  ## number of constrained lines

    ##-----  run opf  -----
    ## build Jacobian and Hessian structure
    if A is not None and issparse(A):
        nA = A.shape[0]  ## number of original linear constraints
    else:
        nA = 0
    nx = len(x0)
    f = branch[:, F_BUS]  ## list of "from" buses
    t = branch[:, T_BUS]  ## list of "to" buses
    Cf = sparse((ones(nl), (arange(nl), f)),
                (nl, nb))  ## connection matrix for line & from buses
    Ct = sparse((ones(nl), (arange(nl), t)),
                (nl, nb))  ## connection matrix for line & to buses
    Cl = Cf + Ct
    Cb = Cl.T * Cl + speye(nb, nb)
    Cl2 = Cl[il, :]
    Cg = sparse((ones(ng), (gen[:, GEN_BUS], arange(ng))), (nb, ng))
    nz = nx - 2 * (nb + ng)
    nxtra = nx - 2 * nb
    if nz > 0:
        Js = vstack([
            hstack([Cb, Cb, Cg, sparse(
                (nb, ng)), sparse((nb, nz))]),
            hstack([Cb, Cb, sparse(
                (nb, ng)), Cg, sparse((nb, nz))]),
            hstack([Cl2, Cl2,
                    sparse((nl2, 2 * ng)),
                    sparse((nl2, nz))]),
            hstack([Cl2, Cl2,
                    sparse((nl2, 2 * ng)),
                    sparse((nl2, nz))])
        ], 'coo')
    else:
        Js = vstack([
            hstack([Cb, Cb, Cg, sparse((nb, ng))]),
            hstack([
                Cb,
                Cb,
                sparse((nb, ng)),
                Cg,
            ]),
            hstack([
                Cl2,
                Cl2,
                sparse((nl2, 2 * ng)),
            ]),
            hstack([
                Cl2,
                Cl2,
                sparse((nl2, 2 * ng)),
            ])
        ], 'coo')

    if A is not None and issparse(A):
        Js = vstack([Js, A], 'coo')

    f, _, d2f = opf_costfcn(x0, om, True)
    Hs = tril(d2f + vstack([
        hstack([Cb, Cb, sparse((nb, nxtra))]),
        hstack([Cb, Cb, sparse((nb, nxtra))]),
        sparse((nxtra, nx))
    ]),
              format='coo')

    ## set options struct for IPOPT
    #    options = {}
    #    options['ipopt'] = ipopt_options([], ppopt)

    ## extra data to pass to functions
    userdata = {
        'om': om,
        'Ybus': Ybus,
        'Yf': Yf[il, :],
        'Yt': Yt[il, :],
        'ppopt': ppopt,
        'il': il,
        'A': A,
        'nA': nA,
        'neqnln': 2 * nb,
        'niqnln': 2 * nl2,
        'Js': Js,
        'Hs': Hs
    }

    ## check Jacobian and Hessian structure
    #xr                  = rand(x0.shape)
    #lmbda               = rand( 2 * nb + 2 * nl2)
    #Js1 = eval_jac_g(x, flag, userdata) #(xr, options.auxdata)
    #Hs1  = eval_h(xr, 1, lmbda, userdata)
    #i1, j1, s = find(Js)
    #i2, j2, s = find(Js1)
    #if (len(i1) != len(i2)) | (norm(i1 - i2) != 0) | (norm(j1 - j2) != 0):
    #    raise ValueError, 'something''s wrong with the Jacobian structure'
    #
    #i1, j1, s = find(Hs)
    #i2, j2, s = find(Hs1)
    #if (len(i1) != len(i2)) | (norm(i1 - i2) != 0) | (norm(j1 - j2) != 0):
    #    raise ValueError, 'something''s wrong with the Hessian structure'

    ## define variable and constraint bounds
    # n is the number of variables
    n = x0.shape[0]
    # xl is the lower bound of x as bounded constraints
    xl = xmin
    # xu is the upper bound of x as bounded constraints
    xu = xmax

    neqnln = 2 * nb
    niqnln = 2 * nl2

    # number of constraints
    m = neqnln + niqnln + nA
    # lower bound of constraint
    gl = r_[zeros(neqnln), -Inf * ones(niqnln), l]
    # upper bound of constraints
    gu = r_[zeros(neqnln), zeros(niqnln), u]

    # number of nonzeros in Jacobi matrix
    nnzj = Js.nnz
    # number of non-zeros in Hessian matrix, you can set it to 0
    nnzh = Hs.nnz

    eval_hessian = True
    if eval_hessian:
        hessian = lambda x, lagrange, obj_factor, flag, user_data=None: \
                eval_h(x, lagrange, obj_factor, flag, userdata)

        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh, eval_f,
                             eval_grad_f, eval_g, eval_jac_g, hessian)
    else:
        nnzh = 0
        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh, eval_f,
                             eval_grad_f, eval_g, eval_jac_g)

    nlp.int_option('print_level', 5)
    nlp.num_option('tol', 1.0000e-12)
    nlp.int_option('max_iter', 250)
    nlp.num_option('dual_inf_tol', 0.10000)
    nlp.num_option('constr_viol_tol', 1.0000e-06)
    nlp.num_option('compl_inf_tol', 1.0000e-05)
    nlp.num_option('acceptable_tol', 1.0000e-08)
    nlp.num_option('acceptable_constr_viol_tol', 1.0000e-04)
    nlp.num_option('acceptable_compl_inf_tol', 0.0010000)
    nlp.str_option('mu_strategy', 'adaptive')

    iter = 0

    def intermediate_callback(algmod,
                              iter_count,
                              obj_value,
                              inf_pr,
                              inf_du,
                              mu,
                              d_norm,
                              regularization_size,
                              alpha_du,
                              alpha_pr,
                              ls_trials,
                              user_data=None):
        iter = iter_count
        return True

    nlp.set_intermediate_callback(intermediate_callback)

    ## run the optimization
    # returns final solution x, upper and lower bound for multiplier, final
    # objective function obj and the return status of ipopt
    x, zl, zu, obj, status, zg = nlp.solve(x0, m, userdata)

    info = {
        'x': x,
        'zl': zl,
        'zu': zu,
        'obj': obj,
        'status': status,
        'lmbda': zg
    }

    nlp.close()

    success = (status == 0) | (status == 1)

    output = {'iterations': iter}

    f, _ = opf_costfcn(x, om)

    ## update solution data
    Va = x[vv['i1']['Va']:vv['iN']['Va']]
    Vm = x[vv['i1']['Vm']:vv['iN']['Vm']]
    Pg = x[vv['i1']['Pg']:vv['iN']['Pg']]
    Qg = x[vv['i1']['Qg']:vv['iN']['Qg']]
    V = Vm * exp(1j * Va)

    ##-----  calculate return values  -----
    ## update voltages & generator outputs
    bus[:, VA] = Va * 180 / pi
    bus[:, VM] = Vm
    gen[:, PG] = Pg * baseMVA
    gen[:, QG] = Qg * baseMVA
    gen[:, VG] = Vm[gen[:, GEN_BUS].astype(int)]

    ## compute branch flows
    f_br = branch[:, F_BUS].astype(int)
    t_br = branch[:, T_BUS].astype(int)
    Sf = V[f_br] * conj(Yf * V)  ## cplx pwr at "from" bus, p.u.
    St = V[t_br] * conj(Yt * V)  ## cplx pwr at "to" bus, p.u.
    branch[:, PF] = Sf.real * baseMVA
    branch[:, QF] = Sf.imag * baseMVA
    branch[:, PT] = St.real * baseMVA
    branch[:, QT] = St.imag * baseMVA

    ## line constraint is actually on square of limit
    ## so we must fix multipliers
    muSf = zeros(nl)
    muSt = zeros(nl)
    if len(il) > 0:
        muSf[il] = 2 * info['lmbda'][2 * nb +
                                     arange(nl2)] * branch[il,
                                                           RATE_A] / baseMVA
        muSt[il] = 2 * info['lmbda'][2 * nb + nl2 +
                                     arange(nl2)] * branch[il,
                                                           RATE_A] / baseMVA

    ## update Lagrange multipliers
    bus[:, MU_VMAX] = info['zu'][vv['i1']['Vm']:vv['iN']['Vm']]
    bus[:, MU_VMIN] = info['zl'][vv['i1']['Vm']:vv['iN']['Vm']]
    gen[:, MU_PMAX] = info['zu'][vv['i1']['Pg']:vv['iN']['Pg']] / baseMVA
    gen[:, MU_PMIN] = info['zl'][vv['i1']['Pg']:vv['iN']['Pg']] / baseMVA
    gen[:, MU_QMAX] = info['zu'][vv['i1']['Qg']:vv['iN']['Qg']] / baseMVA
    gen[:, MU_QMIN] = info['zl'][vv['i1']['Qg']:vv['iN']['Qg']] / baseMVA
    bus[:, LAM_P] = info['lmbda'][nn['i1']['Pmis']:nn['iN']['Pmis']] / baseMVA
    bus[:, LAM_Q] = info['lmbda'][nn['i1']['Qmis']:nn['iN']['Qmis']] / baseMVA
    branch[:, MU_SF] = muSf / baseMVA
    branch[:, MU_ST] = muSt / baseMVA

    ## package up results
    nlnN = om.getN('nln')

    ## extract multipliers for nonlinear constraints
    kl = find(info['lmbda'][:2 * nb] < 0)
    ku = find(info['lmbda'][:2 * nb] > 0)
    nl_mu_l = zeros(nlnN)
    nl_mu_u = r_[zeros(2 * nb), muSf, muSt]
    nl_mu_l[kl] = -info['lmbda'][kl]
    nl_mu_u[ku] = info['lmbda'][ku]

    ## extract multipliers for linear constraints
    lam_lin = info['lmbda'][2 * nb + 2 * nl2 +
                            arange(nA)]  ## lmbda for linear constraints
    kl = find(lam_lin < 0)  ## lower bound binding
    ku = find(lam_lin > 0)  ## upper bound binding
    mu_l = zeros(nA)
    mu_l[kl] = -lam_lin[kl]
    mu_u = zeros(nA)
    mu_u[ku] = lam_lin[ku]

    mu = {
      'var': {'l': info['zl'], 'u': info['zu']},
      'nln': {'l': nl_mu_l, 'u': nl_mu_u}, \
      'lin': {'l': mu_l, 'u': mu_u}
    }

    results = ppc
    results['bus'], results['branch'], results['gen'], \
        results['om'], results['x'], results['mu'], results['f'] = \
            bus, branch, gen, om, x, mu, f

    pimul = r_[results['mu']['nln']['l'] - results['mu']['nln']['u'],
               results['mu']['lin']['l'] - results['mu']['lin']['u'],
               -ones(ny > 0),
               results['mu']['var']['l'] - results['mu']['var']['u']]
    raw = {'xr': x, 'pimul': pimul, 'info': info['status'], 'output': output}

    return results, success, raw
Esempio n. 26
0
    def __solver__(self, p):
        if not pyipoptInstalled:
            p.err('you should have pyipopt installed')
            
#        try:
#            os.close(1); os.close(2) # may not work for non-Unix OS
#        except:
#            pass
            
        nvar = p.n
        x_L = p.lb
        x_U = p.ub

        ncon = p.nc + p.nh + p.b.size + p.beq.size

        g_L, g_U = zeros(ncon), zeros(ncon)
        g_L[:p.nc] = -inf
        g_L[p.nc+p.nh:p.nc+p.nh+p.b.size] = -inf

        
        # IPOPT non-linear constraints, both eq and ineq
        if p.isFDmodel:
            r = []
            if p.nc != 0: r.append(p._getPattern(p.user.c))
            if p.nh != 0: r.append(p._getPattern(p.user.h))
            if p.nb != 0: r.append(p.A)
            if p.nbeq != 0: r.append(p.Aeq)
            if len(r)>0:
                if all([isinstance(elem, ndarray) for elem in r]):
                    r = vstack(r)
                else:
                    r = Vstack(r)
                    if isspmatrix(r):
                        from scipy import __version__
                        if __version__.startswith('0.7.3') or __version__.startswith('0.7.2') or __version__.startswith('0.7.1') or __version__.startswith('0.7.0'):
                            p.pWarn('updating scipy to version >= 0.7.4 is very recommended for the problem with the solver IPOPT')
            else:
                r = array([])
            
            if isspmatrix(r):
                I, J, _ = Find(r)
                # DON'T remove it!
                I, J = array(I, int64), array(J, int64)
            
            elif isinstance(r, ndarray):
                if r.size == 0:
                    I, J= array([], dtype=int64),array([], dtype=int64)
                else:
                    I, J = where(r)
            
            else:
                p.disp('unimplemented type:%s' % str(type(r))) # dense matrix? 
                
            
            nnzj = len(I)
        else:
            I, J = where(ones((ncon, p.n)))
            #I, J = None, None
            nnzj = ncon * p.n #TODO: reduce it
        

        def eval_g(x):
            r = array(())
            if p.userProvided.c: r = p.c(x)
            if p.userProvided.h: r = hstack((r, p.h(x)))
            r = hstack((r, p._get_AX_Less_B_residuals(x), p._get_AeqX_eq_Beq_residuals(x)))
            return r

        def eval_jac_g(x, flag, userdata = (I, J)):
            (I, J) = userdata
            if  flag and p.isFDmodel: 
                return (I, J) 
            r = []
            if p.userProvided.c: r.append(p.dc(x))
            if p.userProvided.h: r.append(p.dh(x))
            if p.nb != 0: r.append(p.A)
            if p.nbeq != 0: r.append(p.Aeq)
            # TODO: fix it!
            if any([isspmatrix(elem) for elem in r]):
                r = Vstack([(atleast_2d(elem) if elem.ndim < 2 else elem) for elem in r])
            elif len(r)!=0:
                r = vstack(r)
            
            if p.isFDmodel: 
                # TODO: make it more properly
                R = (r.tocsr() if isspmatrix(r) else r)[I, J]
                if isspmatrix(R): 
                    return R.A
                elif isinstance(R, ndarray): 
                    return R
                else: p.err('bug in OpenOpt-ipopt connection, inform OpenOpt developers, type(R) = %s' % type(R))
            if flag:
                return (I, J)
            else:
                if isspmatrix(r): r = r.A
                return r.flatten()


        """ This function might be buggy, """ # // comment by Eric
        nnzh = 0
        def eval_h(lagrange, obj_factor, flag):
            return None



#        def apply_new(x):
#            return True

        nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, p.f, p.df, eval_g, eval_jac_g)

        if self.optFile == 'auto':
            lines = ['# generated automatically by OpenOpt\n','print_level 0\n']
            lines.append('tol ' + str(p.ftol)+ '\n')
            lines.append('constr_viol_tol ' + str(p.contol)+ '\n')
            lines.append('max_iter ' + str(min(15000, p.maxIter))+ '\n')
            if self.options != '' :
                for s in re.split(',|;', self.options):
                    lines.append(s.strip().replace('=', ' ',  1) + '\n')
            if p.nc == 0:
                lines.append('jac_d_constant yes\n')
            if p.nh == 0:
                lines.append('jac_c_constant yes\n')
            if p.castFrom.lower() in ('lp', 'qp', 'llsp'):
                lines.append('hessian_constant yes\n')


            ipopt_opt_file = open('ipopt.opt', 'w')
            ipopt_opt_file.writelines(lines)
            ipopt_opt_file.close()


        try:
            x, zl, zu, obj  = nlp.solve(p.x0)[:4]
            if p.point(p.xk).betterThan(p.point(x)):
                obj = p.fk
                p.xk = p.xk.copy() # for more safety
            else:
                p.xk, p.fk = x.copy(), obj
            if p.istop == 0: p.istop = 1000
        finally:
            nlp.close()
Esempio n. 27
0
 def test_example1(self):
     nvar = 4
     x_L = np.ones((nvar), dtype=np.float_) * 1.0
     x_U = np.ones((nvar), dtype=np.float_) * 5.0
     
     ncon = 2
     
     g_L = np.array([25.0, 40.0])
     g_U = np.array([2.0*pow(10.0, 19), 40.0]) 
     
     def eval_f(x, user_data = None):
         assert len(x) == 4
         return x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2]
     
     def eval_grad_f(x, user_data = None):
         assert len(x) == 4
         grad_f = np.array([
             x[0] * x[3] + x[3] * (x[0] + x[1] + x[2]) , 
             x[0] * x[3],
             x[0] * x[3] + 1.0,
             x[0] * (x[0] + x[1] + x[2])
             ], np.float_)
         return grad_f;
         
     def eval_g(x, user_data= None):
         assert len(x) == 4
         return np.array([
             x[0] * x[1] * x[2] * x[3], 
             x[0]*x[0] + x[1]*x[1] + x[2]*x[2] + x[3]*x[3]
         ], np.float_)
     
     nnzj = 8
     def eval_jac_g(x, flag, user_data = None):
         if flag:
             return (np.array([0, 0, 0, 0, 1, 1, 1, 1]), 
                 np.array([0, 1, 2, 3, 0, 1, 2, 3]))
         else:
             assert len(x) == 4
             return np.array([ x[1]*x[2]*x[3], 
                         x[0]*x[2]*x[3], 
                         x[0]*x[1]*x[3], 
                         x[0]*x[1]*x[2],
                         2.0*x[0], 
                         2.0*x[1], 
                         2.0*x[2], 
                         2.0*x[3] ])
             
     nnzh = 10
     def eval_h(x, lagrange, obj_factor, flag, user_data = None):
         if flag:
             hrow = [0, 1, 1, 2, 2, 2, 3, 3, 3, 3]
             hcol = [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
             return (np.array(hcol), np.array(hrow))
         else:
             values = zeros((10), np.float_)
             values[0] = obj_factor * (2*x[3])
             values[1] = obj_factor * (x[3])
             values[2] = 0
             values[3] = obj_factor * (x[3])
             values[4] = 0
             values[5] = 0
             values[6] = obj_factor * (2*x[0] + x[1] + x[2])
             values[7] = obj_factor * (x[0])
             values[8] = obj_factor * (x[0])
             values[9] = 0
             values[1] += lagrange[0] * (x[2] * x[3])
     
             values[3] += lagrange[0] * (x[1] * x[3])
             values[4] += lagrange[0] * (x[0] * x[3])
     
             values[6] += lagrange[0] * (x[1] * x[2])
             values[7] += lagrange[0] * (x[0] * x[2])
             values[8] += lagrange[0] * (x[0] * x[1])
             values[0] += lagrange[1] * 2
             values[2] += lagrange[1] * 2
             values[5] += lagrange[1] * 2
             values[9] += lagrange[1] * 2
             return values
     
     def apply_new(x):
         return True
         
     nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g)
     
     x0 = np.array([1.0, 5.0, 5.0, 1.0])
     pi0 = np.array([1.0, 1.0])
     
     """
     print x0
     print nvar, ncon, nnzj
     print x_L,  x_U
     print g_L, g_U
     print eval_f(x0)
     print eval_grad_f(x0)
     print eval_g(x0)
     a =  eval_jac_g(x0, True)
     print "a = ", a[1], a[0]
     print eval_jac_g(x0, False)
     print eval_h(x0, pi0, 1.0, False)
     print eval_h(x0, pi0, 1.0, True)
     """
     
     r = nlp.solve(x0)
     nlp.close()
     
     # print "Solution of the primal variables, x"
     # print r["x"]
     
     # print "Solution of the bound multipliers, z_L and z_U"
     # print r["mult_xL"], r["mult_xU"]
     
     # print "Solution of the constraint multiplier, lambda"
     # print r["mult_g"]
     
     # print "Objective value"
     # print "f(x*) =", r["f"]
     
     # print "Constraint value"
     # print "g(x*) =", r["g"]
     
     nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h)
     r = nlp.solve(x0)
Esempio n. 28
0
        values[6] += lagrange[0] * (x[1] * x[2])
        values[7] += lagrange[0] * (x[0] * x[2])
        values[8] += lagrange[0] * (x[0] * x[1])
        values[0] += lagrange[1] * 2
        values[2] += lagrange[1] * 2
        values[5] += lagrange[1] * 2
        values[9] += lagrange[1] * 2
        return values


def apply_new(x):
    return True


nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h)

x0 = array([1.0, 5.0, 5.0, 1.0])
pi0 = array([1.0, 1.0])

"""
print x0
print nvar, ncon, nnzj
print x_L,  x_U
print g_L, g_U
print eval_f(x0)
print eval_grad_f(x0)
print eval_g(x0)
a =  eval_jac_g(x0, True)
print "a = ", a[1], a[0]
print eval_jac_g(x0, False)
Esempio n. 29
0
def pyipopt_qp(H, c, A, gl, gu, x0=None, xl=None, xu=None,
               hessian=False, **kw_args):
    """IPOPT based quadratic program solver.

    Solves the quadratic programming problem:

          min 0.5*x'Hx + c'x
           x

    subject to:
            gl <= Ax <= gu
            xl <=  x <= xu

    @param H: quadratic cost coefficient matrix
    @param c: linear cost coefficients
    @param A: linear constraint matrix
    @param gl: lower bound of constraints
    @param gu: upper bound of constraints
    @param x0: initial starting point
    @param xl: lower bound of x as bounded constraints
    @param xu: upper bound of x as bounded constraints
    @param finite_differences: evaluate Hessian by finite differences
    @param kw_args: IPOPT options
    """
    userdata = {'H': H, 'c': c, 'A': A}

    if x0 is None:
        x0 = zeros(A.shape[1])

    # n is the number of variables
    n = x0.shape[0]
    # number of linear constraints (zero nln)
    m = A.shape[0]
    # number of nonzeros in Jacobi matrix
    nnzj = A.nnz

    if gl is None:
        gl = -Inf * ones(m)
    if gu is None:
        gu =  Inf * ones(m)

    if (xl is None) or (len(xl) == 0):
        xl = -Inf * ones(n)
    if (xu is None) or (len(xu) == 0):
        xu =  Inf * ones(n)

    if not hessian:
        nnzh = 0
        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             qp_f, qp_grad_f, qp_g, qp_jac_g)
    else:
        Hl = tril(H, format='coo')

        # number of non-zeros in Hessian matrix
        nnzh = Hl.nnz

        eval_h = lambda x, lagrange, obj_factor, flag, \
                userdata=None: qp_h(x, lagrange, obj_factor, flag, Hl)

        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             qp_f, qp_grad_f, qp_g, qp_jac_g, eval_h)

    for k, v in kw_args.iteritems():
        if isinstance(v, int):
            nlp.int_option(k, v)
        elif isinstance(v, basestring):
            nlp.str_option(k, v)
        else:
            nlp.num_option(k, v)


    # returns  x, upper and lower bound for multiplier, final
    # objective function obj and the return status of IPOPT
    result = nlp.solve(x0, m, userdata)
    ## final values for the primal variables
    x = result[0]
    ## final values for the lower bound multipliers
    zl = result[1]
    ## final values for the upper bound multipliers
    zu = result[2]
    ## final value of the objective
    obj = result[3]
    ## status of the algorithm
    status = result[4]
    ## final values for the constraint multipliers
    zg = result[5]

    nlp.close()

    return x, zl, zu, obj, status, zg
Esempio n. 30
0
        values[6] += lagrange[0] * (x[1] * x[2])
        values[7] += lagrange[0] * (x[0] * x[2])
        values[8] += lagrange[0] * (x[0] * x[1])
        values[0] += lagrange[1] * 2
        values[2] += lagrange[1] * 2
        values[5] += lagrange[1] * 2
        values[9] += lagrange[1] * 2
        return values


def apply_new(x):
    return True


nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f,
                     eval_grad_f, eval_g, eval_jac_g)

x0 = array([1.0, 5.0, 5.0, 1.0])
pi0 = array([1.0, 1.0])
"""
print x0
print nvar, ncon, nnzj
print x_L,  x_U
print g_L, g_U
print eval_f(x0)
print eval_grad_f(x0)
print eval_g(x0)
a =  eval_jac_g(x0, True)
print "a = ", a[1], a[0]
print eval_jac_g(x0, False)
print eval_h(x0, pi0, 1.0, False)
def main():
    prob = {}
    prob["n"] = 20
    prob["qdim"] = 2
    prob["udim"] = 2
    prob["dt"] = 1.0 / (prob["n"] - 1)

    qdim = prob['qdim']
    start = np.array([0] * qdim + [0] * qdim)
    end = np.array([10] * qdim + [0] * qdim)

    n = prob["n"]
    q_v_arr_lst = [
        np.linspace(start[i], end[i], n) for i in range(2 * prob['qdim'])
    ]
    u_arr = np.ones((prob["udim"], n)) * 2
    X_2d = np.vstack([q_v_arr_lst, u_arr])

    X_init = X_2d.T.flatten()
    x_L = np.ones(X_init.size) * -100
    x_U = np.ones(X_init.size) * 100

    X_sample = np.random.uniform(x_L, x_U)

    #set the cost and the gradient of the cost
    ctrl_cost = cost.Control_Cost(prob)
    eval_f_adolc = aa.Func_Adolc(ctrl_cost, X_sample, scaler=True)
    eval_grad_f_adolc = aa.Eval_Grad_F_Adolc(eval_f_adolc.id)
    # eval_grad_f_adolc = aa.Func_Adolc(ctrl_cost.eval_grad_f, X_sample)

    #set the constriant function for points at specific time
    points = [(0, start), (n - 1, end)]
    # p_index, p_g_func = [constraint.get_point_constriant(prob, t, g)
    #                      for (t, g) in points]
    p_index_g_piar = [
        constraint.get_point_constriant(prob, t, g) for (t, g) in points
    ]
    p_index_iter, p_g_func_iter = zip(*p_index_g_piar)
    p_index_lst = list(p_index_iter)
    p_g_lst = list(p_g_func_iter)

    # p_g_adolc_lst = [aa.Func_Adolc(g, X_sample[i])
    #                       for (i, g) in p_index_g_piar]

    #set the dynamic constriants
    block = dy.Block(prob["qdim"], prob["udim"])
    dynamics = block.dynamics
    d_index, d_g_func = constraint.get_dynamic_constriants(
        prob, dynamics, range(0, n - 1))
    # d_g_adolc = aa.Func_Adolc(d_g_func, X_sample[d_index[0]])

    #all dynamcis shares the same approximation function
    # d_g_adolc_lst = [d_g_adolc for i in d_index]
    d_g_lst = [d_g_func for i in d_index]

    index_lst = p_index_lst + d_index
    # eval_g_adolc_lst =  p_g_adolc_lst + d_g_adolc_lst
    eval_g_lst = p_g_lst + d_g_lst

    # X_sample_lst = [X_sample[i] for i in index_lst]
    #
    # g_adolc_x_pair = zip(eval_g_adolc_lst, X_sample_lst)
    #
    # eval_jac_adolc_lst = [aa.Eval_Jac_G_Adolc(g.id, x)
    #                 for (g, x) in g_adolc_x_pair]

    eval_g = constraint.Stacked_Constriants(eval_g_lst, index_lst)

    eval_g_adolc = aa.Func_Adolc(eval_g, X_sample)
    eval_jac_g_adolc = aa.Eval_Jac_G_Adolc(eval_g_adolc.id, X_sample)
    # eval_g_adolc = aa.Func_Adolc(eval_g, X_sample)

    # eval_jac_g = constraint.Stacked_Constriants_Jacobian(eval_g_lst   ,
    #                                                      eval_jac_lst,
    #                                                      index_lst)
    nvar = X_init.size
    ncon = eval_g(X_init).size

    eval_lagrangian = constraint.Eval_Lagrangian(ctrl_cost, eval_g)
    #x, lagrangian, obj_factor
    x_lag_lst = [X_sample, np.ones(ncon), 1]
    x_lag_arr = np.hstack(x_lag_lst)
    eval_lagrangian_adolc = aa.Func_Adolc(eval_lagrangian, x_lag_lst)

    eval_h_adolc = aa.Eval_h_adolc(eval_lagrangian_adolc.id, x_lag_arr)
    maks = eval_h_adolc(X_init, np.ones(ncon), 1, True)
    H = eval_h_adolc(X_init, np.ones(ncon), 1, False)
    g_L = np.zeros(ncon)
    g_U = np.zeros(ncon)

    nnzj = eval_jac_g_adolc.nnzj
    nnzh = eval_h_adolc.nnzh
    import ipdb
    ipdb.set_trace()  # XXX BREAKPOINT

    # nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, 0, eval_f_adolc ,
    #                     eval_grad_f_adolc, eval_g_adolc, eval_jac_g_adolc)

    nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh,
                         eval_f_adolc, eval_grad_f_adolc, eval_g_adolc,
                         eval_jac_g_adolc, eval_h_adolc)

    output, zl, zu, constraint_multipliers, obj, status = nlp.solve(X_init)
    output_2d = output.reshape(n, -1)
    return output_2d, prob
Esempio n. 32
0

print
print "Calling solver with set of vars"
print "nvar = ", nvar 
print "x_L  = ", x_L 
print "x_U  = ", x_U 
print "g_L  = ", g_L 
print "g_U  = ", g_U 
print "ncon = ", ncon 
print "nnzj, nnzh = ", nnzj, ", ", nnzh
print "x0   = ", x0

# Call solve() 
#pyipopt.set_loglevel(2) # verbose
nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, anon_eval_f, anon_eval_grad_f, anon_eval_g, anon_eval_jac_g)
#nlp.int_option("max_iter", 3000)
#nlp.num_option("tol", 1e-8)
#nlp.num_option("acceptable_tol", 1e-2)
#nlp.int_option("acceptable_iter", 0)
nlp.str_option("derivative_test", "first-order")
nlp.str_option("derivative_test_print_all", "no")
#nlp.str_option("print_options_documentation", "yes")
nlp.str_option("print_user_options", "yes")
#nlp.int_option("print_level", 12)
x, zl, zu, obj, status = nlp.solve(x0)
nlp.close()

# Print results
print
print "Solution of the primal variables, x"
Esempio n. 33
0
                    ineq_constr_jac(x),
                    axis=0)
    i_s = []
    j_s = []
    if flag:
        for i in range(len(x0)):
            for j in range(len(ret)):
                i_s.append(i)
                j_s.append(j)
        return (np.array(i_s), np.array(j_s))
    else:
        return ret.flatten()


nnzj = len(eval_jac_g(x0, False))
g_L = np.zeros((len(eval_g(x0))))
g_U = np.zeros_like(g_L)
g_U[neq:] = 1e10
nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, avg_cost,
                     avg_cost_grad, eval_g, eval_jac_g)

nlp.num_option('bound_relax_factor', 0.1)
nlp.str_option("mu_strategy", "adaptive")
nlp.str_option("derivative_test", "first-order")
nlp.str_option('warm_start_init_point', 'yes')
nlp.str_option('linear_solver', 'mumps')
print(datetime.datetime.now(), ": Going to call solve")
x, zl, zu, constraint_multipliers, obj, status = nlp.solve(x0)
nlp.close()
print(x)
Esempio n. 34
0
    def get_ipopt_miu(self):
        """Optimized miu

        Calculate optimal miu. Called when opt=True is passed to loop().
        ...
        Args:
            None

        Returns
            nd.array: Array of optimal miu, n = params.tmax

        """
        try:
            import pyipopt
        except ImportError:
            pyipopt = None
            print('OPTIMIZATION ERROR: It appears that you do not have '
                  'pyipopt installed. Please install it before running '
                  'optimization.')
        x0 = np.concatenate(
            (np.linspace(0, 1, 40) ** (1 - np.linspace(0, 1, 40)), np.ones(20))
        )
        M = 0
        nnzj = 0
        nnzh = 0
        xl = np.zeros(self.params.tmax)
        xu = np.ones(self.params.tmax)
        xl[0] = .005
        xu[0] = .005
        xl[-20:] = 1
        gl = np.zeros(M)
        gu = np.ones(M) * 4.0
        def eval_f(_x0):
            if (_x0 == self.opt_x).all() and self.opt_obj is not None:
                return self.opt_obj
            else:
                self.opt_x = _x0.copy()
                return self.obj_loop(_x0)
        def eval_grad_f(_x0):
            if (_x0 == self.opt_x).all() and self.opt_grad_f is not None:
                return self.opt_grad_f
            else:
                self.opt_x = _x0.copy()
                return self.grad_loop(_x0)
        def eval_g(x):
            return np.zeros(M)
        def eval_jac_g(x, flag):
            if flag:
                return [], []
            else:
                return np.empty(M)
        pyipopt.set_loglevel(1)
        nlp = pyipopt.create(
            self.opt_vars, xl, xu, M, gl, gu, nnzj, nnzh, eval_f,
            eval_grad_f, eval_g, eval_jac_g,
        )
        nlp.num_option('constr_viol_tol', 8e-7)
        nlp.int_option('max_iter', 30)
        nlp.num_option('max_cpu_time', 60)
        nlp.num_option('tol', self.opt_tol)
        # nlp.num_option('acceptable_tol', 1e-4)
        # nlp.int_option('acceptable_iter', 4)
        nlp.num_option('obj_scaling_factor', -1e+0)
        nlp.int_option('print_level', 0)
        nlp.str_option('linear_solver', 'ma57')
        # nlp.str_option('derivative_test', 'first-order')
        x = nlp.solve(x0)[0]
        nlp.close()
        return x
Esempio n. 35
0
def test_ipopt_optimization():
    """
    This test checks
    1. the sparse functionality of pyadolc
    2. the execution speed compared to the direct sparse computation
    3. run the optimization with the derivatives provided by pyadolc
    
    IPOPT is an interior point algorithm to solve
    
    min     f(x)
        x in R^n
    s.t.       g_L <= g(x) <= g_U
                x_L <=  x   <= x_U
    
    this test fails probably because of a bug in pyipopt 

    """

    try:
        import pyipopt
    except:
        #print '"pyipopt is not installed, skipping test'
        #return
        raise NotImplementedError("pyipopt is not installed, skipping test")

    try:
        import scipy.sparse as sparse
    except:
        #print '"pyipopt is not installed, skipping test'
        #return
        raise NotImplementedError("scipy is not installed, skipping test")

    import time

    nvar = 4
    x_L = numpy.ones((nvar), dtype=numpy.float_) * 1.0
    x_U = numpy.ones((nvar), dtype=numpy.float_) * 5.0

    ncon = 2
    g_L = numpy.array([25.0, 40.0])
    g_U = numpy.array([2.0 * pow(10.0, 19), 40.0])

    def eval_f(x, user_data=None):
        assert len(x) == 4
        return x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2]

    def eval_grad_f(x, user_data=None):
        assert len(x) == 4
        grad_f = numpy.array([
            x[0] * x[3] + x[3] * (x[0] + x[1] + x[2]), x[0] * x[3],
            x[0] * x[3] + 1.0, x[0] * (x[0] + x[1] + x[2])
        ])
        return grad_f

    def eval_g(x, user_data=None):
        assert len(x) == 4
        return numpy.array([
            x[0] * x[1] * x[2] * x[3],
            x[0] * x[0] + x[1] * x[1] + x[2] * x[2] + x[3] * x[3]
        ])

    nnzj = 8

    def eval_jac_g(x, flag, user_data=None):
        if flag:
            return (numpy.array([0, 0, 0, 0, 1, 1, 1,
                                 1]), numpy.array([0, 1, 2, 3, 0, 1, 2, 3]))
        else:
            assert len(x) == 4
            return numpy.array([
                x[1] * x[2] * x[3], x[0] * x[2] * x[3], x[0] * x[1] * x[3],
                x[0] * x[1] * x[2], 2.0 * x[0], 2.0 * x[1], 2.0 * x[2],
                2.0 * x[3]
            ])

    nnzh = 10

    def eval_h(x, lagrange, obj_factor, flag, user_data=None):
        if flag:
            hrow = [0, 1, 1, 2, 2, 2, 3, 3, 3, 3]
            hcol = [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
            return (numpy.array(hcol, dtype=int), numpy.array(hrow, dtype=int))
        else:
            values = numpy.zeros((10), numpy.float_)
            values[0] = obj_factor * (2 * x[3])
            values[1] = obj_factor * (x[3])
            values[2] = 0
            values[3] = obj_factor * (x[3])
            values[4] = 0
            values[5] = 0
            values[6] = obj_factor * (2 * x[0] + x[1] + x[2])
            values[7] = obj_factor * (x[0])
            values[8] = obj_factor * (x[0])
            values[9] = 0
            values[1] += lagrange[0] * (x[2] * x[3])

            values[3] += lagrange[0] * (x[1] * x[3])
            values[4] += lagrange[0] * (x[0] * x[3])

            values[6] += lagrange[0] * (x[1] * x[2])
            values[7] += lagrange[0] * (x[0] * x[2])
            values[8] += lagrange[0] * (x[0] * x[1])
            values[0] += lagrange[1] * 2
            values[2] += lagrange[1] * 2
            values[5] += lagrange[1] * 2
            values[9] += lagrange[1] * 2
            return values

    def apply_new(x):
        return True

    x0 = numpy.array([1.0, 5.0, 5.0, 1.0])
    pi0 = numpy.array([1.0, 1.0])

    # check that adolc gives the same answers as derivatives calculated by hand
    trace_on(1)
    ax = adouble(x0)
    independent(ax)
    ay = eval_f(ax)
    dependent(ay)
    trace_off()

    trace_on(2)
    ax = adouble(x0)
    independent(ax)
    ay = eval_g(ax)
    dependent(ay)
    trace_off()

    trace_on(3)
    ax = adouble(x0)
    independent(ax)
    ay = eval_g(ax)
    dependent(ay[0])
    trace_off()

    trace_on(4)
    ax = adouble(x0)
    independent(ax)
    ay = eval_g(ax)
    dependent(ay[1])
    trace_off()

    def eval_f_adolc(x, user_data=None):
        return function(1, x)[0]

    def eval_grad_f_adolc(x, user_data=None):
        return gradient(1, x)

    def eval_g_adolc(x, user_data=None):
        return function(2, x)

    def eval_jac_g_adolc(x, flag, user_data=None):
        options = numpy.array([1, 1, 0, 0], dtype=int)
        result = colpack.sparse_jac_no_repeat(2, x, options)
        if flag:
            return (numpy.asarray(result[1], dtype=int),
                    numpy.asarray(result[2], dtype=int))
        else:
            return result[3]

    def eval_h_adolc(x, lagrange, obj_factor, flag, user_data=None):

        if flag:
            # return sparsity pattern of the hessian

            if eval_h_adolc.firstrun == True:

                # this can be done more elegantly based directly on
                # the sparsity pattern returned by adolc.sparse.hess_pat
                options = numpy.array([0, 0], dtype=int)
                result_f = colpack.sparse_hess_no_repeat(1, x, options)
                result_g0 = colpack.sparse_hess_no_repeat(3, x, options)
                result_g1 = colpack.sparse_hess_no_repeat(4, x, options)
                Hf = sparse.coo_matrix(
                    (result_f[3], (result_f[1], result_f[2])), shape=(4, 4))
                Hg0 = sparse.coo_matrix(
                    (result_g0[3], (result_g0[1], result_g0[2])), shape=(4, 4))
                Hg1 = sparse.coo_matrix(
                    (result_g1[3], (result_g1[1], result_g1[2])), shape=(4, 4))

                H = Hf + Hg0 + Hg1
                H = H.tocoo()
                hrow = H.row
                hcol = H.col
                hpat = (numpy.array(hcol,
                                    dtype=int), numpy.array(hrow, dtype=int))

                eval_h_adolc.hpat = hpat
                eval_h_adolc.firstrun = False

            return eval_h_adolc.hpat

        else:
            # compute sparse hessian
            assert numpy.ndim(x) == 1
            assert numpy.size(x) == 4

            options = numpy.array([0, 0], dtype=int)
            result_f = colpack.sparse_hess_no_repeat(1, x, options)
            result_g0 = colpack.sparse_hess_no_repeat(3, x, options)
            result_g1 = colpack.sparse_hess_no_repeat(4, x, options)
            Hf = sparse.coo_matrix((result_f[3], (result_f[1], result_f[2])),
                                   shape=(4, 4))
            Hg0 = sparse.coo_matrix(
                (result_g0[3], (result_g0[1], result_g0[2])), shape=(4, 4))
            Hg1 = sparse.coo_matrix(
                (result_g1[3], (result_g1[1], result_g1[2])), shape=(4, 4))

            H = Hf + Hg0 + Hg1
            H = H.tocoo()

            values = numpy.zeros((10), float)
            values[:] = H.data
            return values

    eval_h_adolc.firstrun = True

    # function of f
    assert_almost_equal(eval_f(x0), eval_f_adolc(x0))

    # gradient of f
    assert_array_almost_equal(eval_grad_f(x0), eval_grad_f_adolc(x0))

    # function of g
    assert_array_almost_equal(eval_g(x0), function(2, x0))

    # sparse jacobian of g
    assert_array_equal(eval_jac_g_adolc(x0, True)[0], eval_jac_g(x0, True)[0])
    assert_array_equal(eval_jac_g_adolc(x0, True)[1], eval_jac_g(x0, True)[1])
    assert_array_equal(eval_jac_g_adolc(x0, False), eval_jac_g(x0, False))

    # sparse hessian of the lagrangian
    lagrange = numpy.ones(2, dtype=float)
    obj_factor = 1.
    x0 = numpy.random.rand(4)
    result = (eval_h(x0, lagrange, obj_factor,
                     False), eval_h(x0, lagrange, obj_factor, True))
    result_adolc = (eval_h_adolc(x0, lagrange, obj_factor, False),
                    eval_h_adolc(x0, lagrange, obj_factor, True))
    H = sparse.coo_matrix(result, shape=(4, 4))
    H_adolc = sparse.coo_matrix(result_adolc, shape=(4, 4))
    H = H.todense()
    H_adolc = H_adolc.todense()
    assert_array_almost_equal(H, H_adolc.T)

    # test optimization with PYIPOPT
    nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f,
                         eval_grad_f, eval_g, eval_jac_g, eval_h)
    start_time = time.time()
    result = nlp.solve(x0)
    end_time = time.time()
    nlp.close()
    pure_python_optimization_time = end_time - start_time

    nlp_adolc = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh,
                               eval_f_adolc, eval_grad_f_adolc, eval_g_adolc,
                               eval_jac_g_adolc, eval_h_adolc)

    start_time = time.time()
    result_adolc = nlp_adolc.solve(x0)
    end_time = time.time()
    nlp_adolc.close()

    adolc_optimization_time = end_time - start_time
    print('optimization time with derivatives computed by adolc = ',
          adolc_optimization_time)
    print('optimization time with derivatives computed by hand = ',
          pure_python_optimization_time)
    assert adolc_optimization_time / pure_python_optimization_time < 10

    # this works with the pyipopt version from code.google.com
    assert_array_almost_equal(result[0], result_adolc[0])
    assert_array_almost_equal(result[1], result_adolc[1])
    assert_array_almost_equal(result[2], result_adolc[2])
    assert_array_almost_equal(result[3], result_adolc[3])
Esempio n. 36
0
def iterate(k_init, n_agents, gp_old):

    # IPOPT PARAMETERS below "
    N = 3 * n_agents  # number of vars
    M = 3 * n_agents + 1  # number of constraints
    NELE_JAC = N * M
    NELE_HESS = (N**2 - N) / 2 + N  # number of non-zero entries of Hess matrix

    # Vector of variables -> solution of non-linear equation system
    X = np.empty(N)

    LAM = np.empty(M)  # multipliers
    G = np.empty(M)  # (in-)equality constraints

    # Vector of lower and upper bounds
    G_L = np.empty(M)
    G_U = np.empty(M)

    X_L = np.empty(N)
    X_U = np.empty(N)

    Z_L = np.empty(N)
    Z_U = np.empty(N)

    # get coords of an individual points
    grid_pt_box = k_init

    X_L[:n_agents] = c_bar
    X_U[:n_agents] = c_up

    X_L[n_agents:2 * n_agents] = l_bar
    X_U[n_agents:2 * n_agents] = l_up

    X_L[2 * n_agents:3 * n_agents] = inv_bar
    X_U[2 * n_agents:3 * n_agents] = inv_up

    # Set bounds for the constraints
    G_L[:n_agents] = c_bar
    G_U[:n_agents] = c_up

    G_L[n_agents:2 * n_agents] = l_bar
    G_U[n_agents:2 * n_agents] = l_up

    G_L[2 * n_agents:3 * n_agents] = inv_bar
    G_U[2 * n_agents:3 * n_agents] = inv_up

    G_L[3 * n_agents] = 0.0  # both values set to 0 for equality contraints
    G_U[3 * n_agents] = 0.0

    # initial guesses for first iteration
    cons_init = 0.5 * (X_U[:n_agents] - X_L[:n_agents]) + X_L[:n_agents]
    lab_init = 0.5 * (X_U[n_agents:2 * n_agents] -
                      X_L[n_agents:2 * n_agents]) + X_L[n_agents:2 * n_agents]
    inv_init = 0.5 * (X_U[2 * n_agents:3 * n_agents] -
                      X_L[2 * n_agents:3 * n_agents]) + X_L[2 * n_agents:3 *
                                                            n_agents]

    X[:n_agents] = cons_init
    X[n_agents:2 * n_agents] = lab_init
    X[2 * n_agents:3 * n_agents] = inv_init

    # Create ev_f, eval_f, eval_grad_f, eval_g, eval_jac_g for given k_init and n_agent

    def eval_f(X):
        return EV_F_ITER(X, k_init, n_agents, gp_old)

    def eval_grad_f(X):
        return EV_GRAD_F_ITER(X, k_init, n_agents, gp_old)

    def eval_g(X):
        return EV_G_ITER(X, k_init, n_agents)

    def eval_jac_g(X, flag):
        return EV_JAC_G_ITER(X, flag, k_init, n_agents)

    # First create a handle for the Ipopt problem
    nlp = pyipopt.create(N, X_L, X_U, M, G_L, G_U, NELE_JAC, NELE_HESS, eval_f,
                         eval_grad_f, eval_g, eval_jac_g)
    nlp.num_option("obj_scaling_factor", -1.00)
    nlp.num_option("tol", 1e-6)
    nlp.num_option("acceptable_tol", 1e-5)
    nlp.str_option("derivative_test", "first-order")
    nlp.str_option("hessian_approximation", "limited-memory")
    nlp.int_option("print_level", 0)

    x, z_l, z_u, constraint_multipliers, obj, status = nlp.solve(X)
    nlp.close()
    # x: Solution of the primal variables
    # z_l, z_u: Solution of the bound multipliers
    # constraint_multipliers: Solution of the constraint multipliers
    # obj: Objective value
    # status: Exit Status

    # Unpack Consumption, Labor, and Investment
    c = x[:n_agents]
    l = x[n_agents:2 * n_agents]
    inv = x[2 * n_agents:3 * n_agents]
    to_print = np.hstack((obj, x))

    # === debug
    #f=open("results.txt", 'a')
    #np.savetxt(f, np.transpose(to_print) #, fmt=len(x)*'%10.10f ')
    #for num in to_print:
    #    f.write(str(num)+"\t")
    #f.write("\n")
    #f.close()

    return obj, c, l, inv
def ipoptopf_solver(om, ppopt):
    """Solves AC optimal power flow using IPOPT.

    Inputs are an OPF model object and a PYPOWER options vector.

    Outputs are a C{results} dict, C{success} flag and C{raw} output dict.

    C{results} is a PYPOWER case dict (ppc) with the usual C{baseMVA}, C{bus}
    C{branch}, C{gen}, C{gencost} fields, along with the following additional
    fields:
        - C{order}      see 'help ext2int' for details of this field
        - C{x}          final value of optimization variables (internal order)
        - C{f}          final objective function value
        - C{mu}         shadow prices on ...
            - C{var}
                - C{l}  lower bounds on variables
                - C{u}  upper bounds on variables
            - C{nln}
                - C{l}  lower bounds on nonlinear constraints
                - C{u}  upper bounds on nonlinear constraints
            - C{lin}
                - C{l}  lower bounds on linear constraints
                - C{u}  upper bounds on linear constraints

    C{success} is C{True} if solver converged successfully, C{False} otherwise

    C{raw} is a raw output dict in form returned by MINOS
        - C{xr}     final value of optimization variables
        - C{pimul}  constraint multipliers
        - C{info}   solver specific termination code
        - C{output} solver specific output information

    @see: L{opf}, L{pips}

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    @author: Richard Lincoln
    """
    import pyipopt

    ## unpack data
    ppc = om.get_ppc()
    baseMVA, bus, gen, branch, gencost = \
        ppc['baseMVA'], ppc['bus'], ppc['gen'], ppc['branch'], ppc['gencost']
    vv, _, nn, _ = om.get_idx()

    ## problem dimensions
    nb = shape(bus)[0]          ## number of buses
    ng = shape(gen)[0]          ## number of gens
    nl = shape(branch)[0]       ## number of branches
    ny = om.getN('var', 'y')    ## number of piece-wise linear costs

    ## linear constraints
    A, l, u = om.linear_constraints()

    ## bounds on optimization vars
    _, xmin, xmax = om.getv()

    ## build admittance matrices
    Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)

    ## try to select an interior initial point
    ll = xmin.copy(); uu = xmax.copy()
    ll[xmin == -Inf] = -2e19   ## replace Inf with numerical proxies
    uu[xmax ==  Inf] =  2e19
    x0 = (ll + uu) / 2
    Varefs = bus[bus[:, BUS_TYPE] == REF, VA] * (pi / 180)
    x0[vv['i1']['Va']:vv['iN']['Va']] = Varefs[0]  ## angles set to first reference angle
    if ny > 0:
        ipwl = find(gencost[:, MODEL] == PW_LINEAR)
#        PQ = r_[gen[:, PMAX], gen[:, QMAX]]
#        c = totcost(gencost[ipwl, :], PQ[ipwl])
        ## largest y-value in CCV data
        c = gencost.flatten('F')[sub2ind(shape(gencost), ipwl, NCOST + 2 * gencost[ipwl, NCOST])]
        x0[vv['i1']['y']:vv['iN']['y']] = max(c) + 0.1 * abs(max(c))
#        x0[vv['i1']['y']:vv['iN']['y']) = c + 0.1 * abs(c)

    ## find branches with flow limits
    il = find((branch[:, RATE_A] != 0) & (branch[:, RATE_A] < 1e10))
    nl2 = len(il)           ## number of constrained lines

    ##-----  run opf  -----
    ## build Jacobian and Hessian structure
    if A is not None and issparse(A):
        nA = A.shape[0]                ## number of original linear constraints
    else:
        nA = 0
    nx = len(x0)
    f = branch[:, F_BUS]                           ## list of "from" buses
    t = branch[:, T_BUS]                           ## list of "to" buses
    Cf = sparse((ones(nl), (arange(nl), f)), (nl, nb))      ## connection matrix for line & from buses
    Ct = sparse((ones(nl), (arange(nl), t)), (nl, nb))      ## connection matrix for line & to buses
    Cl = Cf + Ct
    Cb = Cl.T * Cl + speye(nb, nb)
    Cl2 = Cl[il, :]
    Cg = sparse((ones(ng), (gen[:, GEN_BUS], arange(ng))), (nb, ng))
    nz = nx - 2 * (nb + ng)
    nxtra = nx - 2 * nb
    if nz > 0:
        Js = vstack([
            hstack([Cb,      Cb,      Cg,              sparse((nb, ng)),   sparse((nb,  nz))]),
            hstack([Cb,      Cb,      sparse((nb, ng)),   Cg,              sparse((nb,  nz))]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),               sparse((nl2, nz))]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),               sparse((nl2, nz))])
        ], 'coo')
    else:
        Js = vstack([
            hstack([Cb,      Cb,      Cg,              sparse((nb, ng))]),
            hstack([Cb,      Cb,      sparse((nb, ng)),   Cg,          ]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),           ]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),           ])
        ], 'coo')

    if A is not None and issparse(A):
        Js = vstack([Js, A], 'coo')

    f, _, d2f = opf_costfcn(x0, om, True)
    Hs = tril(d2f + vstack([
        hstack([Cb,  Cb,  sparse((nb, nxtra))]),
        hstack([Cb,  Cb,  sparse((nb, nxtra))]),
        sparse((nxtra, nx))
    ]), format='coo')

    ## set options struct for IPOPT
#    options = {}
#    options['ipopt'] = ipopt_options([], ppopt)

    ## extra data to pass to functions
    userdata = {
        'om':       om,
        'Ybus':     Ybus,
        'Yf':       Yf[il, :],
        'Yt':       Yt[il, :],
        'ppopt':    ppopt,
        'il':       il,
        'A':        A,
        'nA':       nA,
        'neqnln':   2 * nb,
        'niqnln':   2 * nl2,
        'Js':       Js,
        'Hs':       Hs
    }

    ## check Jacobian and Hessian structure
    #xr                  = rand(x0.shape)
    #lmbda               = rand( 2 * nb + 2 * nl2)
    #Js1 = eval_jac_g(x, flag, userdata) #(xr, options.auxdata)
    #Hs1  = eval_h(xr, 1, lmbda, userdata)
    #i1, j1, s = find(Js)
    #i2, j2, s = find(Js1)
    #if (len(i1) != len(i2)) | (norm(i1 - i2) != 0) | (norm(j1 - j2) != 0):
    #    raise ValueError, 'something''s wrong with the Jacobian structure'
    #
    #i1, j1, s = find(Hs)
    #i2, j2, s = find(Hs1)
    #if (len(i1) != len(i2)) | (norm(i1 - i2) != 0) | (norm(j1 - j2) != 0):
    #    raise ValueError, 'something''s wrong with the Hessian structure'

    ## define variable and constraint bounds
    # n is the number of variables
    n = x0.shape[0]
    # xl is the lower bound of x as bounded constraints
    xl = xmin
    # xu is the upper bound of x as bounded constraints
    xu = xmax

    neqnln = 2 * nb
    niqnln = 2 * nl2

    # number of constraints
    m = neqnln + niqnln + nA
    # lower bound of constraint
    gl = r_[zeros(neqnln), -Inf * ones(niqnln), l]
    # upper bound of constraints
    gu = r_[zeros(neqnln),       zeros(niqnln), u]

    # number of nonzeros in Jacobi matrix
    nnzj = Js.nnz
    # number of non-zeros in Hessian matrix, you can set it to 0
    nnzh = Hs.nnz

    eval_hessian = True
    if eval_hessian:
        hessian = lambda x, lagrange, obj_factor, flag, user_data=None: \
                eval_h(x, lagrange, obj_factor, flag, userdata)

        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             eval_f, eval_grad_f, eval_g, eval_jac_g, hessian)
    else:
        nnzh = 0
        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             eval_f, eval_grad_f, eval_g, eval_jac_g)

    nlp.int_option('print_level', 5)
    nlp.num_option('tol', 1.0000e-12)
    nlp.int_option('max_iter', 250)
    nlp.num_option('dual_inf_tol', 0.10000)
    nlp.num_option('constr_viol_tol', 1.0000e-06)
    nlp.num_option('compl_inf_tol', 1.0000e-05)
    nlp.num_option('acceptable_tol', 1.0000e-08)
    nlp.num_option('acceptable_constr_viol_tol', 1.0000e-04)
    nlp.num_option('acceptable_compl_inf_tol', 0.0010000)
    nlp.str_option('mu_strategy', 'adaptive')

    iter = 0
    def intermediate_callback(algmod, iter_count, obj_value, inf_pr, inf_du,
            mu, d_norm, regularization_size, alpha_du, alpha_pr, ls_trials,
            user_data=None):
        iter = iter_count
        return True

    nlp.set_intermediate_callback(intermediate_callback)

    ## run the optimization
    # returns final solution x, upper and lower bound for multiplier, final
    # objective function obj and the return status of ipopt
    x, zl, zu, obj, status, zg = nlp.solve(x0, m, userdata)

    info = {'x': x, 'zl': zl, 'zu': zu, 'obj': obj, 'status': status, 'lmbda': zg}

    nlp.close()

    success = (status == 0) | (status == 1)

    output = {'iterations': iter}

    f, _ = opf_costfcn(x, om)

    ## update solution data
    Va = x[vv['i1']['Va']:vv['iN']['Va']]
    Vm = x[vv['i1']['Vm']:vv['iN']['Vm']]
    Pg = x[vv['i1']['Pg']:vv['iN']['Pg']]
    Qg = x[vv['i1']['Qg']:vv['iN']['Qg']]
    V = Vm * exp(1j * Va)

    ##-----  calculate return values  -----
    ## update voltages & generator outputs
    bus[:, VA] = Va * 180 / pi
    bus[:, VM] = Vm
    gen[:, PG] = Pg * baseMVA
    gen[:, QG] = Qg * baseMVA
    gen[:, VG] = Vm[gen[:, GEN_BUS].astype(int)]

    ## compute branch flows
    f_br = branch[:, F_BUS].astype(int)
    t_br = branch[:, T_BUS].astype(int)
    Sf = V[f_br] * conj(Yf * V)  ## cplx pwr at "from" bus, p.u.
    St = V[t_br] * conj(Yt * V)  ## cplx pwr at "to" bus, p.u.
    branch[:, PF] = Sf.real * baseMVA
    branch[:, QF] = Sf.imag * baseMVA
    branch[:, PT] = St.real * baseMVA
    branch[:, QT] = St.imag * baseMVA

    ## line constraint is actually on square of limit
    ## so we must fix multipliers
    muSf = zeros(nl)
    muSt = zeros(nl)
    if len(il) > 0:
        muSf[il] = 2 * info['lmbda'][2 * nb +       arange(nl2)] * branch[il, RATE_A] / baseMVA
        muSt[il] = 2 * info['lmbda'][2 * nb + nl2 + arange(nl2)] * branch[il, RATE_A] / baseMVA

    ## update Lagrange multipliers
    bus[:, MU_VMAX]  = info['zu'][vv['i1']['Vm']:vv['iN']['Vm']]
    bus[:, MU_VMIN]  = info['zl'][vv['i1']['Vm']:vv['iN']['Vm']]
    gen[:, MU_PMAX]  = info['zu'][vv['i1']['Pg']:vv['iN']['Pg']] / baseMVA
    gen[:, MU_PMIN]  = info['zl'][vv['i1']['Pg']:vv['iN']['Pg']] / baseMVA
    gen[:, MU_QMAX]  = info['zu'][vv['i1']['Qg']:vv['iN']['Qg']] / baseMVA
    gen[:, MU_QMIN]  = info['zl'][vv['i1']['Qg']:vv['iN']['Qg']] / baseMVA
    bus[:, LAM_P]    = info['lmbda'][nn['i1']['Pmis']:nn['iN']['Pmis']] / baseMVA
    bus[:, LAM_Q]    = info['lmbda'][nn['i1']['Qmis']:nn['iN']['Qmis']] / baseMVA
    branch[:, MU_SF] = muSf / baseMVA
    branch[:, MU_ST] = muSt / baseMVA

    ## package up results
    nlnN = om.getN('nln')

    ## extract multipliers for nonlinear constraints
    kl = find(info['lmbda'][:2 * nb] < 0)
    ku = find(info['lmbda'][:2 * nb] > 0)
    nl_mu_l = zeros(nlnN)
    nl_mu_u = r_[zeros(2 * nb), muSf, muSt]
    nl_mu_l[kl] = -info['lmbda'][kl]
    nl_mu_u[ku] =  info['lmbda'][ku]

    ## extract multipliers for linear constraints
    lam_lin = info['lmbda'][2 * nb + 2 * nl2 + arange(nA)]   ## lmbda for linear constraints
    kl = find(lam_lin < 0)                     ## lower bound binding
    ku = find(lam_lin > 0)                     ## upper bound binding
    mu_l = zeros(nA)
    mu_l[kl] = -lam_lin[kl]
    mu_u = zeros(nA)
    mu_u[ku] = lam_lin[ku]

    mu = {
      'var': {'l': info['zl'], 'u': info['zu']},
      'nln': {'l': nl_mu_l, 'u': nl_mu_u}, \
      'lin': {'l': mu_l, 'u': mu_u}
    }

    results = ppc
    results['bus'], results['branch'], results['gen'], \
        results['om'], results['x'], results['mu'], results['f'] = \
            bus, branch, gen, om, x, mu, f

    pimul = r_[
        results['mu']['nln']['l'] - results['mu']['nln']['u'],
        results['mu']['lin']['l'] - results['mu']['lin']['u'],
        -ones(ny > 0),
        results['mu']['var']['l'] - results['mu']['var']['u']
    ]
    raw = {'xr': x, 'pimul': pimul, 'info': info['status'], 'output': output}

    return results, success, raw
Esempio n. 38
0
def main(traj_filename):
    prob = {}
    prob["n"] = 10
    prob["qdim"] = 2
    prob["udim"] = 1
    # prob["dt"] = 0.1/( prob["n"]-1)
    prob["dt"] = 0.1

    p_L = [-2, -1*np.pi, -2, -np.pi, -3]
    p_U = [2, 1*np.pi, 2, np.pi,  3]

    x_L =  np.tile(p_L, prob["n"])
    x_U =  np.tile(p_U, prob["n"])

    qdim = prob['qdim']
    start = np.array([0]*qdim+[0]*qdim)
    start[1] = -np.pi
    start[1] = 0
    end = np.array([0]*qdim+[0]*qdim)

    n = prob["n"]
    q_v_arr_lst = [np.linspace(start[i], end[i], n) for i in range(2*prob['qdim'])]
    u_arr = np.ones((prob["udim"], n))*0.001
    X_2d = np.vstack([q_v_arr_lst, u_arr])

    X_init = X_2d.T.flatten()
    # set the control cost
    X_sample = np.random.uniform(x_L, x_U)

    #set the cost and the gradient of the cost
    ctrl_cost = cost.Control_Cost(prob)
    eval_f_adolc = aa.Func_Adolc(ctrl_cost, X_sample, scaler=True)
    eval_grad_f_adolc = aa.Eval_Grad_F_Adolc(eval_f_adolc.id)
    # eval_grad_f_adolc = aa.Func_Adolc(ctrl_cost.eval_grad_f, X_sample)


    #set the constriant function for points at specific time
    g1 = np.array([-np.pi, 0])
    g2 = np.array([0, 0])
    points = [(0, g1), (n-1, g2)]
    # points = [(n-1, end)]
    # points = [(0, start)]
    # p_index, p_g_func = [constraint.get_point_constriant(prob, t, g)
    #                      for (t, g) in points]
    # q and v of the pole
    dims = np.array([1,1+prob["qdim"]])
    p_index_g_piar = [constraint.get_point_constriant(prob, t, g, dims)
                         for (t, g) in points]
    p_index_iter, p_g_func_iter = zip(*p_index_g_piar)
    p_index_lst = list(p_index_iter)
    p_g_lst = list(p_g_func_iter)

    # p_g_adolc_lst = [aa.Func_Adolc(g, X_sample[i])
    #                       for (i, g) in p_index_g_piar]

    D_factory= constraint.Dynamics_constriant
    model_path = "/home/tao/src/gym/gym/envs/mujoco/assets/inverted_pendulum.xml"

    model = dy.make_model(model_path)
    sim = mujoco_py.MjSim(model)
    qdim = model.nq
    udim = model.nu

    cart = dy.Mujoco_Dynamics(model, sim, qdim, udim)
    dynamics = cart.dynamics

    d_index, d_g_func = constraint.get_dynamic_constriants(prob,
                                                           dynamics,
                                                           range(0, n-1))
    # d_g_adolc = aa.Func_Adolc(d_g_func, X_sample[d_index[0]])

    #all dynamcis shares the same approximation function
    # d_g_adolc_lst = [d_g_adolc for i in d_index]
    d_g_lst = [d_g_func for i in d_index]

    index_lst = p_index_lst + d_index
    eval_g_lst = p_g_lst + d_g_lst

    # X_sample_lst = [X_sample[i] for i in index_lst]
    #
    # g_adolc_x_pair = zip(eval_g_adolc_lst, X_sample_lst)
    #
    # eval_jac_adolc_lst = [aa.Eval_Jac_G_Adolc(g.id, x)
    #                 for (g, x) in g_adolc_x_pair]


    eval_g = constraint.Stacked_Constriants(eval_g_lst, index_lst)

    eval_g_adolc = aa.Func_Adolc(eval_g, X_sample)
    eval_jac_g_adolc = aa.Eval_Jac_G_Adolc(eval_g_adolc.id, X_sample)
    # eval_g_adolc = aa.Func_Adolc(eval_g, X_sample)

    # eval_jac_g = constraint.Stacked_Constriants_Jacobian(eval_g_lst   ,
    #                                                      eval_jac_lst,
    #                                                      index_lst)
    nvar = X_init.size
    ncon = eval_g(X_init).size

    eval_lagrangian = constraint.Eval_Lagrangian(ctrl_cost, eval_g)
    #x, lagrangian, obj_factor
    x_lag_lst = [X_sample, np.ones(ncon), 1]
    x_lag_arr = np.hstack(x_lag_lst)
    eval_lagrangian_adolc = aa.Func_Adolc(eval_lagrangian, x_lag_lst)

    eval_h_adolc = aa.Eval_h_adolc(eval_lagrangian_adolc.id, x_lag_arr)
    maks = eval_h_adolc(X_init, np.ones(ncon), 1,  True)
    H = eval_h_adolc(X_init, np.ones(ncon), 1, False)
    g_L = np.zeros(ncon)
    g_U = np.zeros(ncon)

    nnzj = eval_jac_g_adolc.nnzj
    nnzh = eval_h_adolc.nnzh
    nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, 0, eval_f_adolc ,
                        eval_grad_f_adolc, eval_g_adolc, eval_jac_g_adolc)
    # nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f_adolc ,
    #                     eval_grad_f_adolc, eval_g_adolc, eval_jac_g_adolc, eval_h_adolc)

    output, zl, zu, constraint_multipliers, obj, status = nlp.solve(X_init)
    output_2d = output.reshape(n, -1)
    output.dump(traj_filename)
    return output_2d, prob
Esempio n. 39
0
def cons_opt(obj, cons, Vars, x0):
    """nlinprog

    Parameters
    ----------
    obj :
    cons :

    Returns
    -------

    Notes
    ------
    """
    nvars = len(Vars)

    x_L = np.array((pyipopt.NLP_LOWER_BOUND_INF,)*nvars)
    x_U = np.array((pyipopt.NLP_UPPER_BOUND_INF,)*nvars)
    #x_L = -20.*np.ones(nvars)
    #x_U = 20.*np.ones(nvars)

    g_L, g_U, g = [], [], []
    for gc in group_cons_by_ub_lb(cons):
        g_L.append(gc.lb)
        g_U.append(gc.ub)
        g.append(gc.expr)
    ncon = len(g)

    g_L, g_U = np.array(g_L), np.array(g_U)
    eval_g = ft.partial(list2array_wrap, sym.lambdify(Vars, g))

    js = jac(Vars, g)
    jrow, jcol, jdata = np.asarray(js.row, dtype=int), np.asarray(js.col, dtype=int), js.data
    eval_jac_g = ft.partial(eval_jac_cons, (jrow, jcol, sym.lambdify(Vars, jdata.tolist())))

    eval_f = ft.partial(eval_expr, sym.lambdify(Vars, obj))
    eval_grad_f = ft.partial(eval_grad_obj, sym.lambdify(Vars, grad(Vars, obj)))
    #eval_hessian_f = ft.partial(eval_expr, sym.lambdify(Vars, sym.hessian(obj, Vars)))

    nnzj = js.nnz
    nnzh = 0

    if debug:
        for gi, lb, ub in zip(g, g_L, g_U):
            print('{} \in [{}, {}]'.format(gi, lb, ub))

    nlp = pyipopt.create(nvars, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g)
    # Verbosity level \in [0, 12]
    nlp.int_option('print_level', print_level)
    nlp.num_option('constr_viol_tol', constr_viol_tol)
    res_x, zl, zu, constraint_multipliers, res_obj, status = nlp.solve(x0)
    nlp.close()

    if debug:

        def print_variable(variable_name, value):
            for i in xrange(len(value)):
                print(variable_name + "["+str(i)+"] =", value[i])

        print()
        print("Solution of the primal variables, x")
        print_variable("x", res_x)
        print()
        print("Solution of the bound multipliers, z_L and z_U")
        print_variable("z_L", zl)
        print_variable("z_U", zu)
        print()
        print("Solution of the constraint multipliers, lambda")
        print_variable("lambda", constraint_multipliers)
        print()
        print("Objective value")
        print("f(x*) =", res_obj)

    # Return codes in IpReturnCodes_inc.h
    print('status:', status)
    return spec.OPTRES(res_obj, res_x, 'OK', status in (0, 1))
Esempio n. 40
0
    
## IPOPT SOLUTION

start = time.time()
nvar = data.numX
xl = np.zeros(data.numX)
xu = 2e19*np.ones(data.numX)
m = 0
gl = np.zeros(1)
gu = 2e19*np.ones(1)
g_L = np.array([], dtype=float)
g_U = np.array([], dtype=float)
nnzj = 0
nnzh = int(data.numX * (data.numX + 1) / 2)

nlp = pyipopt.create(nvar, xl, xu, m, g_L, g_U, nnzj, nnzh, evaluateFunction, evaluateGradient, eval_g, eval_jac_g, evaluateHessian)
nlp.num_option('tol', 1e-5)
nlp.int_option("print_level", 5)
nlp.str_option('hessian_approximation', 'limited-memory')
nlp.str_option('mu_strategy', 'adaptive')
nlp.str_option('mu_oracle', 'probing')
nlp.str_option('linear_solver', 'ma97')
nlp.num_option('acceptable_tol', 1e-2)
nlp.int_option("acceptable_iter", 5)
nlp.num_option('acceptable_obj_change_tol', 5e-1)
mylines = [float(myline) for myline in [line.rstrip('\n') for line in open('/home/wilmer/Dropbox/IpOptSolver/currentIntensities.txt')]]
data.currentIntensities = np.array(mylines)
x, zl, zu, constraint_multipliers, obj, status = nlp.solve(data.currentIntensities)
print('solved in ' + str(time.time()-start) + ' seconds')

# PYTHON scipy.optimize solution
Esempio n. 41
0
    def min_ipopt(self, XP0, xtrace=None):
        """
        Minimize f starting from XP0 using IPOPT.
        Returns the minimizing state, the minimum function value, and the
        termination information.
        """
        if self.taped == False:
            self.tape_A(xtrace)

        #Is this time consuming - creates a new instance of the class at each call
        #Need to move outside this function
        #Or figure out how to do this WITH a function instead of a class
        eval_h_adolc = self.Eval_h_adolc(XP0)
        nnzh = eval_h_adolc.nnz
        #Includes NPest
        nvar = len(XP0)

        #Use bounds - Includes parameters. Does this work for time dependent parameters?
        x_L = np.asarray(self.bounds)[:, 0]
        x_U = np.asarray(self.bounds)[:, 1]

        ncon = 0
        g_L = np.array([])
        g_U = np.array([])
        nnzj = 0

        nlp_adolc = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh,
                                   A_taped, gradA_taped, eval_g, eval_jac_g,
                                   eval_h_adolc)

        #Setting default settings to match minAone with adjustments
        nlp_adolc.num_option('tol', 1e-6)
        nlp_adolc.str_option('mu_strategy', 'adaptive')
        nlp_adolc.str_option('adaptive_mu_globalization',
                             'never-monotone-mode')
        nlp_adolc.int_option('max_iter', 1000)
        nlp_adolc.str_option('linear_solver', 'ma97')
        nlp_adolc.num_option('bound_relax_factor', 0)

        #IPOPT distinguishes between num, int, and str options
        #Only supports 3 options atm, could for loop through values with if statements...
        if self.opt_args is not None:
            if 'max_iter' in boundz:
                nlp_adolc.int_option('max_iter', boundz.get('max_iter'))

            if 'tol' in boundz:
                nlp_adolc.num_option('tol', boundz.get('tol'))

            if 'linear_solver' in boundz:
                nlp_adolc.str_option('linear_solver',
                                     boundz.get('linear_solver'))

        # start the optimization
        print("Beginning optimization...")
        tstart = time.time()

        XPmin, _, _, _, Amin, status = nlp_adolc.solve(XP0)

        #deleting objects
        nlp_adolc.close()
        del eval_h_adolc

        print("Optimization complete!")
        print("Time = {0} s".format(time.time() - tstart))
        #print("Exit flag = {0}".format(status))
        #print("Exit message: {0}".format(res.message))
        #print("Iterations = {0}".format(res.nit))
        print("Obj. function value = {0}\n".format(Amin))

        return XPmin, Amin, status
Esempio n. 42
0
def get_ipopt_options(rd, lb, ub, tol, max_iter, **kwargs):
    """Get options for IPOPT module (interior point algorithm)

    See `<https://projects.coin-or.org/Ipopt>`

    rd : :py:class`dolfin_adjoint.ReducedFunctional` 
            The reduced functional
    lb : list 
        Lower bound on the control
    ub : list
        Upper bound on the control
    tol : float
        Tolerance
    max_iter : int
        Maximum number of iterations

    *Returns*

    nlp : ipopt instance
        A nonlinear ipopt problem
    """

    ncontrols = len(ub)
    nconstraints = 0
    empty = np.array([], dtype=float)
    clb = empty
    cub = empty
    constraints_nnz = nconstraints * ncontrols
    # The constraint function, should do nothing
    def fun_g(x, user_data=None):
        return empty

    # The constraint Jacobian
    def jac_g(x, flag, user_data=None):
        if flag:
            rows = np.array([], dtype=int)
            cols = np.array([], dtype=int)
            return (rows, cols)
        else:
            return empty

    J = rd.__call__
    dJ = rd.derivative

    nlp = pyipopt.create(
        ncontrols,  # length of control vector
        lb,  # lower bounds on control vector
        ub,  # upper bounds on control vector
        0,  # number of constraints
        clb,  # lower bounds on constraints,
        cub,  # upper bounds on constraints,
        0,  # number of nonzeros in the constraint Jacobian
        0,  # number of nonzeros in the Hessian
        J,  # to evaluate the functional
        dJ,  # to evaluate the gradient
        fun_g,  # to evaluate the constraints
        jac_g,
    )  # to evaluate the constraint Jacobian

    pyipopt.set_loglevel(1)
    return nlp
Esempio n. 43
0
		return j.tolist()

dummyh, hrow, hcol = _amplpy.eval_H( nlp.x0, nlp.pi0, 1, 1.0 )
print hrow, hcol
dummyh, hrow, hcol = nlp.hess( nlp.x0, nlp.pi0, 1.0 )

def eval_h(x, lagrange, obj_factor, flag):
	if flag:
		return (hrow.tolist(), hcol.tolist())
	else:
		temph, dummyr, dummyc = nlp.hess(array(x), array(lagrange), obj_factor)
		assert len(temph) == nnzh
		return temph.tolist()

print n, m
print xl, xu
print nnzj
print gl, gu
print x0
print eval_f(x0)
print eval_grad_f(x0)
print eval_g(x0)
print eval_jac_g(x0, False)
print eval_jac_g(x0, True)

nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, 0, eval_f, eval_grad_f, eval_g, eval_jac_g, )

print nlp

#nlp.solve(x0)
def MCdo(b, i):
    user = os.path.expanduser("~")

    tempOut = robj.r("rm(list=ls())")
    tempOut = robj.r("i <- %i"%(i+1))
    tempOut = robj.r("b <- %i"%(i*(1000)+(b+1) ))
    tempOut = robj.r("source('CMLE_unstable_support.r')")
    regrMat = np.array((robj.r("do.call(cbind, regr)")))
    regr = OrderedDict([('SA',np.ones((regrMat.shape[0],0))),('VA',regrMat[:,0:1]),('CB',np.ones((regrMat.shape[0],0))),('barWA',regrMat[:,1:2]),('barWB',regrMat[:,2:4]),('bara',regrMat[:,4:5]),('VB',regrMat[:,5:6])])
    Y = np.array((robj.r("Y")))
    x0 = np.array((robj.r("unname(x0)")))
    xL = np.array((robj.r("unname(xL)")))
    tPL = np.array((robj.r("unname(out.2step$time )")))
    
    def fll(x):
        return LL_jo(x, Y, regr)
    
    def heq(x):
        return  const_cmle(x, Y, regr)
    np.random.seed(b)
    while np.isinf(fll(adouble(x0)).val):
        x0 = np.random.uniform(size=len(x0))
    
    ccd = os.getcwd()
    if not os.path.exists(user + '/Documents/adolc%i_%i'%(i, b)):
        os.makedirs(user + '/Documents/adolc%i_%i'%(i, b))
    
    os.chdir(user + '/Documents/adolc%i_%i'%(i, b))
    
    
    
    adolc.trace_on(1)
    ax = adolc.adouble(np.zeros(len(x0)))
    adolc.independent(ax)
    ay = fll(ax)
    adolc.dependent(ay)
    adolc.trace_off()
    
    # trace constraint function
    adolc.trace_on(2)
    ax = adolc.adouble(x0)
    adolc.independent(ax)
    ay = heq(ax)
    adolc.dependent(ay)
    adolc.trace_off()
    
    npar = len(x0)
    
    def adFun(x):
        return adolc.function(1, x)
    
    def grFun(x): 
        return adolc.gradient(1, x)
    
    def  const_adolc(x):
        return adolc.function(2,x)
    
    def jac_adolc(x):
        return adolc.jacobian(2,x)
    
    
    def lagrangian(x, lagrange, obj_factor):
        return  obj_factor*fll(x) + np.dot(lagrange, heq(x))

    #Jacobian
    
    
    #### initalize it
    class jac_c_adolc:
        
        def __init__(self, x):
            options = None
            result = adolc.colpack.sparse_jac_no_repeat(2,x,options)
            
            self.nnz  = result[0]     
            self.rind = np.asarray(result[1],dtype=int)
            self.cind = np.asarray(result[2],dtype=int)
            self.values = np.asarray(result[3],dtype=float)
            
        def __call__(self, x, flag, user_data=None):
            if flag:
                return (self.rind, self.cind)
            else:
                result = adolc.colpack.sparse_jac_repeat(2, x, self.nnz, self.rind,
                    self.cind, self.values)
                return result[3]
    
    ##### create the function
    Jac_c_adolc = jac_c_adolc(x0)
    
    
    ###Hessian
  
        
    # trace lagrangian function
    adolc.trace_on(3)
    ax = adolc.adouble(x0)
    adolc.independent(ax)
    ay = lagrangian(ax, xL, np.array([1.0]))
    adolc.dependent(ay)
    adolc.trace_off()
    

    M = Y.shape[1]
    nreal = npar-M
    given = {'rind': np.concatenate((np.kron(np.arange(nreal), np.ones(npar,dtype='int')), np.arange(nreal, npar))),
             'cind': np.concatenate((np.tile(np.arange(npar), nreal), np.arange(nreal, npar)))}
    mask = np.where(given['rind'] <= given['cind'])
    given['rind'] = given['rind'][mask]
    given['cind'] = given['cind'][mask]
    
    
    def hessLag_adolc(x, lagrange, obj_factor, flag, user_data=None):
        if flag:
            result = (given['rind'], given['cind'])
        else:
             result = np.ravel(adolc.hessian(3, x)[given['rind'],given['cind']], order="C")
        return result
    
    
    H2 = hessLag_adolc(x0, xL, 1.0, False)
    H2a = hessLag_adolc(x0, xL, 1.0, True)
    nnzh = len(given['rind'])
   

    ##Optimization
    #PRELIMS: other things to pass to IPOPT
    nvar = len(x0) #number of variables in the problem
    x_L = np.array([-np.inf]*nvar, dtype=float) #box contraints on variables (none)
    x_U = np.array([np.inf]*nvar, dtype=float)
     
    #PRELIMS:define the (in)equality constraints
    ncon = heq(ax).shape[0] #number of constraints
    g_L = np.array([0]*ncon, dtype=float) #constraints are to equal 0
    g_U = np.array([0]*ncon, dtype=float) #constraints are to equal 0
    
    
    #PRELIMS: define the number of nonzeros in the jacobian 
    val = Jac_c_adolc(x0, False) 
    nnzj = len(val)            
    
      
    # create the nonlinear programming model
    nlp2 = pyipopt.create(
    nvar, 
    x_L,
    x_U,
    ncon,
    g_L,
    g_U,
    nnzj,
    nnzh,
    adFun,
    grFun,
    const_adolc,
    Jac_c_adolc,
    hessLag_adolc
    )
    
    nlp2.num_option('expect_infeasible_problem_ctol', 1e-15)
    nlp2.int_option('max_iter', 100)
    nlp2.num_option('dual_inf_tol', 1e-3)
    nlp2.num_option('constr_viol_tol', 1e-3)
    nlp2.num_option('tol', 1e-6)
    nlp2.int_option('print_level', 0)
    
    t1 = time()
    out = nlp2.solve(x0)
    t1 = time()-t1
    t1 = t1 + tPL
    
    # free the model
    nlp2.close()

    
    os.chdir(ccd)
    shutil.rmtree(user + '/Documents/adolc%i_%i'%(i, b)) 
    output = np.concatenate((out[0][:6], t1, np.array([out[5]])))
    return output
Esempio n. 45
0
    def _minimize(self, initial_val, loss_grad_func, equality_funcs,
                  equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
                  packed_bounds, step_callback, optimizer_kwargs):

        # initial value should be float64
        initial_val = np.array(initial_val, dtype=np.float_)

        # objective function
        def eval_f(x, user_data=None):
            loss, _ = loss_grad_func(x)
            return np.array(loss, dtype=np.float_)

        # gradient of objective function
        def eval_grad_f(x, user_data=None):
            _, grad_f = loss_grad_func(x)
            return np.array(grad_f, dtype=np.float_)

        # gradient function (first inequalities then equalities)
        def eval_g(x, user_data=None):
            inequalities = [inequality_funcs[i](x) for i in range(nineqcon)]
            equalities = [equality_funcs[i](x) for i in range(neqcon)]
            return np.array(inequalities + equalities, dtype=np.float_).reshape(ncon, )

        # hessian of the lagrangian (first inequalities then equalities)
        def eval_h(x, lagrange, obj_factor, flag, user_data=None):
            rows, cols = np.tril_indices(nvar)
            if flag:
                return (np.array(rows, dtype=np.int_), np.array(cols, dtype=np.int_))
            else:
                loss = [loss_hessian_func(x)]
                inequalities = [inequality_hessian_funcs[i](x) for i in range(nineqcon)]
                equalities = [equality_hessian_funcs[i](x) for i in range(neqcon)]
                values = np.zeros([nvar, nvar])
                values += obj_factor * loss[0][0]
                for idc in range(nineqcon):
                    values += lagrange[idc] * inequalities[idc][0]
                for idc in range(neqcon):
                    values += lagrange[idc + nineqcon] * equalities[idc][0]
                return np.array(values.reshape(nvar, nvar)[rows, cols], dtype=np.float_)

        # jacobian for gradient (first inequalities the equalities)
        def eval_jac_g(x, flag, user_data=None):
            rows, cols = np.indices((ncon, nvar))
            if flag:
                return (np.array(rows.reshape(-1, 1), dtype=np.int_), np.array(cols.reshape(-1, 1), dtype=np.int_))
            else:
                inequalities = [inequality_grad_funcs[i](x) for i in range(nineqcon)]
                equalities = [equality_grad_funcs[i](x) for i in range(neqcon)]
                values = np.empty([ncon, nvar])
                for idc in range(nineqcon):
                    values[idc, :] = inequalities[idc][0]
                for idc in range(neqcon):
                    values[idc + nineqcon, :] = equalities[idc][0]
                return np.array(values.reshape(ncon * nvar, ), dtype=np.float_)

        # box constraints on the variables
        nvar = int(np.sum([np.prod(self._vars[i].get_shape().as_list()) for i in range(len(self._vars))]))
        if self._packed_bounds is None:
            x_L = -np.ones((nvar), dtype=np.float_) * np.inf
            x_U = np.ones((nvar), dtype=np.float_) * np.inf
        else:
            x_L, x_U = zip(*self._packed_bounds)
            x_L = np.array(x_L, dtype=np.float_)
            x_U = np.array(x_U, dtype=np.float_)

        # inequality constraints as g(x)>=0 and equality constraints as h(x)=0
        nineqcon = len(self._inequalities)
        neqcon = len(self._equalities)
        ncon = nineqcon + neqcon
        g_L_ineq = np.zeros((nineqcon), dtype=np.float_)
        g_U_ineq = np.ones((nineqcon), dtype=np.float_) * 2.0 * pow(10.0, 19)
        g_L_eq = np.zeros((neqcon), dtype=np.float_)
        g_U_eq = np.zeros((neqcon), dtype=np.float_)
        g_L = np.concatenate((g_L_ineq, g_L_eq), axis=0)
        g_U = np.concatenate((g_U_ineq, g_U_eq), axis=0)
        nnzj = nvar * ncon
        nnzh = int(nvar * (nvar + 1) / 2)

        minimize_args = [nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g]

        # create nlp in ipopt
        import pyipopt

        # log_level decides if logs from pyipopt are desired -- these are logs on
        # top of what is returned from ipopt set by "print_level"; see below
        if "log_level" in optimizer_kwargs["options"]:
            pyipopt.set_loglevel(optimizer_kwargs["options"]["log_level"])

        nlp = pyipopt.create(*minimize_args)

        # check https://www.coin-or.org/Ipopt/documentation/node40.html
        # for more options and default settings
        # default print_level=5
        # default max_iter=3000
        # default tol=1e-8

        for optvar in optimizer_kwargs["options"]:
            if optvar is "log_level":
                print
            elif type(optimizer_kwargs["options"][optvar]) is np.str:
                nlp.str_option(optvar, optimizer_kwargs["options"][optvar])
            elif type(optimizer_kwargs["options"][optvar]) is np.int:
                nlp.int_option(optvar, optimizer_kwargs["options"][optvar])
            else:
                nlp.num_option(optvar, optimizer_kwargs["options"][optvar])

        result_x, zl, zu, constraint_multipliers, result_f, status = nlp.solve(initial_val)
        nlp.close()

        message_lines = [
            'Optimization terminated with:',
            '  Message: %s',
            '  Objective function value: %f',
        ]
        message_args = [status, result_f]
        logging.info('\n'.join(message_lines), *message_args)
        print("Optimization terminated with message: {}".format(status))
        return result_x
Esempio n. 46
0
def test_ipopt_optimization():
	"""
	This test checks
	  1. the sparse functionality of pyadolc
	  2. the execution speed compared to the direct sparse computation
	  3. run the optimization with the derivatives provided by pyadolc
	
	IPOPT is an interior point algorithm to solve
	
	   min     f(x)
        x in R^n
	   s.t.       g_L <= g(x) <= g_U
	              x_L <=  x   <= x_U

	"""
	
	try:
		import pyipopt
	except:
		#print '"pyipopt is not installed, skipping test'
		#return
		raise NotImplementedError("pyipopt is not installed, skipping test")
	import time

	nvar = 4
	x_L = numpy.ones((nvar), dtype=numpy.float_) * 1.0
	x_U = numpy.ones((nvar), dtype=numpy.float_) * 5.0

	ncon = 2
	g_L = numpy.array([25.0, 40.0])
	g_U = numpy.array([2.0*pow(10.0, 19), 40.0]) 

	def eval_f(x, user_data = None):
		assert len(x) == 4
		return x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2]

	def eval_grad_f(x, user_data = None):
		assert len(x) == 4
		grad_f = numpy.array([
			x[0] * x[3] + x[3] * (x[0] + x[1] + x[2]) ,
			x[0] * x[3],
			x[0] * x[3] + 1.0,
			x[0] * (x[0] + x[1] + x[2])
			])
		return grad_f;
		
	def eval_g(x, user_data= None):
		assert len(x) == 4
		return numpy.array([
			x[0] * x[1] * x[2] * x[3], 
			x[0]*x[0] + x[1]*x[1] + x[2]*x[2] + x[3]*x[3]
		])

	nnzj = 8
	def eval_jac_g(x, flag, user_data = None):
		if flag:
			return (numpy.array([0, 0, 0, 0, 1, 1, 1, 1]), 
				numpy.array([0, 1, 2, 3, 0, 1, 2, 3]))
		else:
			assert len(x) == 4
			return numpy.array([ x[1]*x[2]*x[3], 
						x[0]*x[2]*x[3], 
						x[0]*x[1]*x[3], 
						x[0]*x[1]*x[2],
						2.0*x[0], 
						2.0*x[1], 
						2.0*x[2], 
						2.0*x[3] ])
			
	nnzh = 10
	def eval_h(x, lagrange, obj_factor, flag, user_data = None):
		if flag:
			hrow = [0, 1, 1, 2, 2, 2, 3, 3, 3, 3]
			hcol = [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
			return (numpy.array(hcol,dtype=int), numpy.array(hrow,dtype=int))
		else:
			values = numpy.zeros((10), numpy.float_)
			values[0] = obj_factor * (2*x[3])
			values[1] = obj_factor * (x[3])
			values[2] = 0
			values[3] = obj_factor * (x[3])
			values[4] = 0
			values[5] = 0
			values[6] = obj_factor * (2*x[0] + x[1] + x[2])
			values[7] = obj_factor * (x[0])
			values[8] = obj_factor * (x[0])
			values[9] = 0
			values[1] += lagrange[0] * (x[2] * x[3])

			values[3] += lagrange[0] * (x[1] * x[3])
			values[4] += lagrange[0] * (x[0] * x[3])

			values[6] += lagrange[0] * (x[1] * x[2])
			values[7] += lagrange[0] * (x[0] * x[2])
			values[8] += lagrange[0] * (x[0] * x[1])
			values[0] += lagrange[1] * 2
			values[2] += lagrange[1] * 2
			values[5] += lagrange[1] * 2
			values[9] += lagrange[1] * 2
			return values



	def apply_new(x):
		return True

	x0 = numpy.array([1.0, 5.0, 5.0, 1.0])
	pi0 = numpy.array([1.0, 1.0])

	# check that adolc gives the same answers as derivatives calculated by hand
	trace_on(1)
	ax = adouble(x0)
	independent(ax)
	ay = eval_f(ax)
	dependent(ay)
	trace_off()

	trace_on(2)
	ax = adouble(x0)
	independent(ax)
	ay = eval_g(ax)
	dependent(ay)
	trace_off()
	
	trace_on(3)
	ax = adouble(x0)
	independent(ax)
	ay = eval_g(ax)
	dependent(ay[0])
	trace_off()
	
	trace_on(4)
	ax = adouble(x0)
	independent(ax)
	ay = eval_g(ax)
	dependent(ay[1])
	trace_off()
	

	def eval_f_adolc(x, user_data = None):
		 return function(1,x)[0]

	def eval_grad_f_adolc(x, user_data = None):
		 return gradient(1,x)

	def eval_g_adolc(x, user_data= None):
		return function(2,x)

	def eval_jac_g_adolc(x, flag, user_data = None):
		options = numpy.array([1,1,0,0],dtype=int)
		result = sparse.sparse_jac_no_repeat(2,x,options)
		if flag:
			return (numpy.asarray(result[1],dtype=int), numpy.asarray(result[2],dtype=int))
		else:
			return result[3]
			
	def eval_h_adolc(x, lagrange, obj_factor, flag, user_data = None):
		options = numpy.array([0,0],dtype=int)
		assert numpy.ndim(x) == 1
		assert numpy.size(x) == 4
		result_f = sparse.sparse_hess_no_repeat(1, x, options)
		result_g0 = sparse.sparse_hess_no_repeat(3, x,options)
		result_g1 = sparse.sparse_hess_no_repeat(4, x,options)
		Hf  = scipy.sparse.coo_matrix( (result_f[3], (result_f[1], result_f[2])), shape=(4, 4))
		Hg0 = scipy.sparse.coo_matrix( (result_g0[3], (result_g0[1], result_g0[2])), shape=(4, 4))
		Hg1 = scipy.sparse.coo_matrix( (result_g1[3], (result_g1[1], result_g1[2])), shape=(4, 4))
		
		H = Hf + Hg0 + Hg1
		H = H.tocoo()
		
		if flag:
			hrow = H.row
			hcol = H.col
			return (numpy.array(hcol,dtype=int), numpy.array(hrow,dtype=int))

		else:
			values = numpy.zeros((10), float)
			values[:] = H.data
			return values

	# function of f
	assert_almost_equal(eval_f(x0), eval_f_adolc(x0))
	
	# gradient of f
	assert_array_almost_equal(eval_grad_f(x0), eval_grad_f_adolc(x0))

	# function of g
	assert_array_almost_equal(eval_g(x0), function(2,x0))

	# sparse jacobian of g
	assert_array_equal(eval_jac_g_adolc(x0,True)[0], eval_jac_g(x0,True)[0])
	assert_array_equal(eval_jac_g_adolc(x0,True)[1], eval_jac_g(x0,True)[1])
	assert_array_equal(eval_jac_g_adolc(x0,False),  eval_jac_g(x0,False))
	
	# sparse hessian of the lagrangian
	lagrange = numpy.ones(2,dtype=float)
	obj_factor = 1.
	x0 = numpy.random.rand(4)
	result       = (eval_h(x0, lagrange, obj_factor, False), eval_h(x0, lagrange, obj_factor, True))
	result_adolc = (eval_h_adolc(x0, lagrange, obj_factor, False), eval_h_adolc(x0, lagrange, obj_factor, True))
	H       = scipy.sparse.coo_matrix( result, shape=(4, 4))
	H_adolc = scipy.sparse.coo_matrix( result_adolc, shape=(4, 4))
	H = H.todense()
	H_adolc = H_adolc.todense()
	assert_array_almost_equal( H, H_adolc.T)


	# test optimization with PYIPOPT
	nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h)
	start_time = time.time()
	result =  nlp.solve(x0)
	end_time = time.time()
	nlp.close()
	pure_python_optimization_time = end_time - start_time


	nlp_adolc = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f_adolc, eval_grad_f_adolc, eval_g_adolc, eval_jac_g_adolc, eval_h_adolc)

	start_time = time.time()
	result_adolc = nlp_adolc.solve(x0)
	end_time = time.time()
	nlp_adolc.close()
	
	adolc_optimization_time = end_time - start_time
	print 'optimization time with derivatives computed by adolc = ', adolc_optimization_time
	print 'optimization time with derivatives computed by hand = ',pure_python_optimization_time
	assert adolc_optimization_time / pure_python_optimization_time < 10
	
	# this works with the pyipopt version from code.google.com
	assert_array_almost_equal(result[0], result_adolc[0])
	assert_array_almost_equal(result[1], result_adolc[1])
	assert_array_almost_equal(result[2], result_adolc[2])
	assert_array_almost_equal(result[3], result_adolc[3])
Esempio n. 47
0
print eval_f(x0)
assert len(eval_grad_f(x0)) == n
print eval_grad_f(x0)

assert len(eval_g(x0)) == m
a =  eval_jac_g(x0, True)
print "row, col ", a[0], a[1]
print eval_jac_g(x0, False)

print eval_h(True, True, True, True)

print eval_h(nlp.x0, nlp.pi0, 1.0, True)

def gf2(x, data = None):
	return array([0.0 for i in xrange(m)])

def f2(x, data = None):
	return 0
"""
	
print "Solving problem with approximate hession calculation now"	
problem = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, 0, eval_f, eval_grad_f, eval_g, eval_jac_g)

problem.solve(x0)
problem.close()

print "Solving problem with exact hession calculation now"	
mynlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h, applynew)
mynlp.solve(x0)
mynlp.close()
Esempio n. 48
0
def Ipopt_Solve(problem):
    """Solves a Nexus optimization problem using ipopt

        Assumptions:
        You can actually install ipopt on your machine

        Source:
        N/A

        Inputs:
        problem    [nexus()]

        Outputs:
        result     [array]

        Properties Used:
        None
    """      
    
    # Pull out the basic problem
    inp = problem.optimization_problem.inputs
    obj = problem.optimization_problem.objective
    con = problem.optimization_problem.constraints
    
    # Number of input variables and constrains
    nvar = len(inp)
    ncon = len(con)
    
    # Set inputs
    ini = inp[:,1] # Initials
    bnd = inp[:,2] # Bounds
    scl = inp[:,3] # Scale
    
    # Scaled initials
    x0 = ini/scl
    x0 = x0.astype(float)
    
    # Nonzero jacobians and hessians, fix this
    nnzj = ncon*nvar
    nnzh = nvar*nvar
     
    # Bounds for inputs and constraints
    flbd = np.zeros_like(ini)
    fubd = np.zeros_like(ini)
    for ii in xrange(0,nvar):
        flbd[ii] = (bnd[ii][0]/scl[ii])
        fubd[ii] = (bnd[ii][1]/scl[ii])

    g_L = np.zeros_like(con)
    g_U = np.zeros_like(con)
    
    # Setup constraints
    for ii in xrange(0,len(con)):
        name = con[ii][0]
        edge = con[ii][2]
        if con[ii][1]=='<':
            g_L[ii] = -np.inf
            g_U[ii] = edge
        elif con[ii][1]=='>':
            g_L[ii] = edge
            g_U[ii] = np.inf
        elif con[ii][1]=='=':
            g_L[ii] = edge
            g_U[ii] = edge

    # Instantiate the problem and set objective
    import pyipopt   #import down here to allow SUAVE to run without the user having Ipopt
    
    flbd = flbd.astype(float)
    fubd = fubd.astype(float)
    g_L  = g_L.astype(float)
    g_U  = g_U.astype(float)
    
    # Create the problem
    nlp = pyipopt.create(nvar, flbd, fubd, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g)

    nlp.str_option('derivative_test_print_all','yes')    
    nlp.str_option('derivative_test','first-order')


    # Solve the problem
    result = nlp.solve(x0,problem)
    nlp.close()
    
    return result
Esempio n. 49
0
## IPOPT SOLUTION

start = time.time()
nvar = data.numX
xl = np.zeros(data.numX)
xu = 2e19 * np.ones(data.numX)
m = 0
gl = np.zeros(1)
gu = 2e19 * np.ones(1)
g_L = np.array([], dtype=float)
g_U = np.array([], dtype=float)
nnzj = 0
nnzh = int(data.numX * (data.numX + 1) / 2)

nlp = pyipopt.create(nvar, xl, xu, m, g_L, g_U, nnzj, nnzh, evaluateFunction,
                     evaluateGradient, eval_g, eval_jac_g, evaluateHessian)
nlp.num_option('tol', 1e-5)
nlp.int_option("print_level", 5)
nlp.str_option('hessian_approximation', 'limited-memory')
nlp.str_option('mu_strategy', 'adaptive')
nlp.str_option('mu_oracle', 'probing')
nlp.str_option('linear_solver', 'ma97')
nlp.num_option('acceptable_tol', 1e-2)
nlp.int_option("acceptable_iter", 5)
nlp.num_option('acceptable_obj_change_tol', 5e-1)
mylines = [
    float(myline) for myline in [
        line.rstrip('\n') for line in open(
            '/home/wilmer/Dropbox/IpOptSolver/currentIntensities.txt')
    ]
]
Esempio n. 50
0
# sparse jacobian of g
assert_array_equal(eval_jac_g_adolc(x0, True)[0], eval_jac_g(x0, True)[0])
assert_array_equal(eval_jac_g_adolc(x0, True)[1], eval_jac_g(x0, True)[1])
assert_array_equal(eval_jac_g_adolc(x0, False), eval_jac_g(x0, False))

# test optimization with PYIPOPT
nvar = 4
x_L = numpy.ones((nvar), dtype=numpy.float_) * 1.0
x_U = numpy.ones((nvar), dtype=numpy.float_) * 5.0

ncon = 2
g_L = numpy.array([25.0, 40.0])
g_U = numpy.array([2.0 * pow(10.0, 19), 40.0])

nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f,
                     eval_grad_f, eval_g, eval_jac_g, eval_h)
start_time = time.time()
result = nlp.solve(x0)
end_time = time.time()
nlp.close()
pure_python_optimization_time = end_time - start_time

nlp_adolc = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh,
                           eval_f_adolc, eval_grad_f_adolc, eval_g_adolc,
                           eval_jac_g_adolc, eval_h_adolc)

start_time = time.time()
result_adolc = nlp_adolc.solve(x0)
end_time = time.time()
nlp_adolc.close()
Esempio n. 51
0
def Ipopt_Solve(problem):
    """Solves a Nexus optimization problem using ipopt

        Assumptions:
        You can actually install ipopt on your machine

        Source:
        N/A

        Inputs:
        problem    [nexus()]

        Outputs:
        result     [array]

        Properties Used:
        None
    """

    # Pull out the basic problem
    inp = problem.optimization_problem.inputs
    obj = problem.optimization_problem.objective
    con = problem.optimization_problem.constraints

    # Number of input variables and constrains
    nvar = len(inp)
    ncon = len(con)

    # Set inputs
    ini = inp[:, 1]  # Initials
    bndl = inp[:, 2]  # Bounds
    bndu = inp[:, 3]  # Bounds
    scl = inp[:, 4]  # Scale

    # Scaled initials
    x0 = ini / scl
    x0 = x0.astype(float)

    # Nonzero jacobians and hessians, fix this
    nnzj = ncon * nvar
    nnzh = nvar * nvar

    # Bounds for inputs and constraints
    flbd = np.zeros_like(ini)
    fubd = np.zeros_like(ini)
    for ii in range(0, nvar):
        flbd[ii] = (bndl[ii] / scl[ii])
        fubd[ii] = (bndu[ii] / scl[ii])

    g_L = np.zeros_like(con)
    g_U = np.zeros_like(con)

    # Setup constraints
    for ii in range(0, len(con)):
        name = con[ii][0]
        edge = con[ii][2]
        if con[ii][1] == '<':
            g_L[ii] = -np.inf
            g_U[ii] = edge
        elif con[ii][1] == '>':
            g_L[ii] = edge
            g_U[ii] = np.inf
        elif con[ii][1] == '=':
            g_L[ii] = edge
            g_U[ii] = edge

    # Instantiate the problem and set objective
    import pyipopt  #import down here to allow SUAVE to run without the user having Ipopt

    flbd = flbd.astype(float)
    fubd = fubd.astype(float)
    g_L = g_L.astype(float)
    g_U = g_U.astype(float)

    # Create the problem
    nlp = pyipopt.create(nvar, flbd, fubd, ncon, g_L, g_U, nnzj, nnzh, eval_f,
                         eval_grad_f, eval_g, eval_jac_g)

    nlp.str_option('derivative_test_print_all', 'yes')
    nlp.str_option('derivative_test', 'first-order')

    # Solve the problem
    result = nlp.solve(x0, problem)
    nlp.close()

    return result
Esempio n. 52
0
    def __solve__(self,
                  opt_problem={},
                  sens_type='FD',
                  store_sol=True,
                  disp_opts=False,
                  store_hst=False,
                  hot_start=False,
                  sens_mode='',
                  sens_step={},
                  *args,
                  **kwargs):
        """
        Run Optimizer (Optimize Routine)

        **Keyword arguments:**

        - opt_problem -> INST: Optimization instance
        - sens_type -> STR/FUNC: Gradient type, *Default* = 'FD'
        - store_sol -> BOOL: Store solution in Optimization class flag,
          *Default* = True
        - disp_opts -> BOOL: Flag to display options in solution text, *Default*
          = False
        - store_hst -> BOOL/STR: Flag/filename to store optimization history,
          *Default* = False
        - hot_start -> BOOL/STR: Flag/filename to read optimization history,
          *Default* = False
        - sens_mode -> STR: Flag for parallel gradient calculation, *Default* =
          ''
        - sens_step -> FLOAT: Sensitivity setp size, *Default* = {} [corresponds
          to 1e-6 (FD), 1e-20(CS)]

        Documentation last updated:  Feb. 2, 2011 - Peter W. Jansen
        """

        self.pll = False
        self.myrank = 0

        myrank = self.myrank

        tmp_file = False
        def_fname = self.options['output_file'][1].split('.')[0]
        if isinstance(store_hst, str):
            if isinstance(hot_start, str):
                if (myrank == 0):
                    if (store_hst == hot_start):
                        hos_file = History(hot_start, 'r', self)
                        log_file = History(store_hst + '_tmp', 'w', self,
                                           opt_problem.name)
                        tmp_file = True
                    else:
                        hos_file = History(hot_start, 'r', self)
                        log_file = History(store_hst, 'w', self,
                                           opt_problem.name)

                self.sto_hst = True
                self.hot_start = True
            elif hot_start:
                if (myrank == 0):
                    hos_file = History(store_hst, 'r', self)
                    log_file = History(store_hst + '_tmp', 'w', self,
                                       opt_problem.name)
                    tmp_file = True

                self.sto_hst = True
                self.hot_start = True
            else:
                if (myrank == 0):
                    log_file = History(store_hst, 'w', self, opt_problem.name)

                self.sto_hst = True
                self.hot_start = False

        elif store_hst:
            if isinstance(hot_start, str):
                if (hot_start == def_fname):
                    if (myrank == 0):
                        hos_file = History(hot_start, 'r', self)
                        log_file = History(def_fname + '_tmp', 'w', self,
                                           opt_problem.name)
                        tmp_file = True

                else:
                    if (myrank == 0):
                        hos_file = History(hot_start, 'r', self)
                        log_file = History(def_fname, 'w', self,
                                           opt_problem.name)

                self.sto_hst = True
                self.hot_start = True
            elif hot_start:
                if (myrank == 0):
                    hos_file = History(def_fname, 'r', self)
                    log_file = History(def_fname + '_tmp', 'w', self,
                                       opt_problem.name)
                    tmp_file = True

                self.sto_hst = True
                self.hot_start = True
            else:
                if (myrank == 0):
                    log_file = History(def_fname, 'w', self, opt_problem.name)

                self.sto_hst = True
                self.hot_start = False

        else:
            self.sto_hst = False
            self.hot_start = False

        gradient = Gradient(opt_problem, sens_type, sens_mode, sens_step,
                            *args, **kwargs)

        def eval_f(x, user_data=None):
            """IPOPT - Objective Value Function."""
            # Variables Groups Handling
            if opt_problem.use_groups:
                xg = {}
                for group in group_ids.keys():
                    if (group_ids[group][1] - group_ids[group][0] == 1):
                        xg[group] = x[group_ids[group][0]]
                    else:
                        xg[group] = x[group_ids[group][0]:group_ids[group][1]]

                xn = xg
            else:
                xn = x

            # Flush Output Files
            self.flushFiles()

            # Evaluate User Function
            fail = 0
            # if (myrank == 0):
            #    if self.hot_start:
            #        [vals,hist_end] = hos_file.read(ident=['obj', 'con', 'fail'])
            #        if hist_end:
            #            self.hot_start = False
            #            hos_file.close()
            #        else:
            #            [ff,gg,fail] = [vals['obj'][0][0],vals['con'][0],int(vals['fail'][0][0])]
            #
            #

            # if self.pll:
            #    self.hot_start = Bcast(self.hot_start,root=0)

            # if self.hot_start and self.pll:
            #    [ff,gg,fail] = Bcast([ff,gg,fail],root=0)
            # else:
            [ff, gg, fail] = opt_problem.obj_fun(xn, *args, **kwargs)

            # Store History
            if (myrank == 0):
                if self.sto_hst:
                    log_file.write(x, 'x')
                    log_file.write(ff, 'obj')
                    log_file.write(gg, 'con')
                    log_file.write(fail, 'fail')

                # Objective Assigment
            if isinstance(ff, complex):
                f = ff.astype(float)
            else:
                f = ff

            # Constraints Assigment
            g = numpy.zeros(len(opt_problem._constraints.keys()))
            for i in range(len(opt_problem._constraints.keys())):
                if isinstance(gg[i], complex):
                    g[i] = gg[i].astype(float)
                else:
                    g[i] = gg[i]

            return f

        def eval_g(x, user_data=None):

            # Variables Groups Handling
            if opt_problem.use_groups:
                xg = {}
                for group in group_ids.keys():
                    if (group_ids[group][1] - group_ids[group][0] == 1):
                        xg[group] = x[group_ids[group][0]]
                    else:
                        xg[group] = x[group_ids[group][0]:group_ids[group][1]]

                xn = xg
            else:
                xn = x

            # Flush Output Files
            self.flushFiles()

            # Evaluate User Function
            fail = 0
            #            if (myrank == 0):
            #                if self.hot_start:
            #                    [vals,hist_end] = hos_file.read(ident=['obj', 'con', 'fail'])
            #                    if hist_end:
            #                        self.hot_start = False
            #                        hos_file.close()
            #                    else:
            #                        [ff,gg,fail] = [vals['obj'][0][0],vals['con'][0],int(vals['fail'][0][0])]

            # if self.pll:
            #   self.hot_start = Bcast(self.hot_start,root=0)

            # if self.hot_start and self.pll:
            #    [ff,gg,fail] = Bcast([ff,gg,fail],root=0)
            # else:
            [ff, gg, fail] = opt_problem.obj_fun(xn, *args, **kwargs)

            # Store History
            if (myrank == 0):
                if self.sto_hst:
                    log_file.write(x, 'x')
                    log_file.write(ff, 'obj')
                    log_file.write(gg, 'con')
                    log_file.write(fail, 'fail')

                # Objective Assigment
            if isinstance(ff, complex):
                f = ff.astype(float)
            else:
                f = ff

            # Constraints Assigment
            g = numpy.zeros(len(opt_problem._constraints.keys()))
            for i in range(len(opt_problem._constraints.keys())):
                if isinstance(gg[i], complex):
                    g[i] = gg[i].astype(float)
                else:
                    g[i] = gg[i]

            return g

        def eval_grad_f(x, user_data=None):
            """IPOPT - Objective/Constraint Gradients Function."""
            # if self.hot_start:
            #    if (myrank == 0):
            #        [vals,hist_end] = hos_file.read(ident=['grad_obj','grad_con'])
            #        if hist_end:
            #            self.hot_start = False
            #            hos_file.close()
            #        else:
            #            dff = vals['grad_obj'][0].reshape((len(opt_problem._objectives.keys()),len(opt_problem._variables.keys())))
            #            dgg = vals['grad_con'][0].reshape((len(opt_problem._constraints.keys()),len(opt_problem._variables.keys())))
            #
            #
            #    if self.pll:
            #        self.hot_start = Bcast(self.hot_start,root=0)
            #
            #    if self.hot_start and self.pll:
            #        [dff,dgg] = Bcast([dff,dgg],root=0)
            #

            # if not self.hot_start:

            [f, g, fail] = opt_problem.obj_fun(x, *args, **kwargs)
            dff, dgg = gradient.getGrad(x, group_ids, [f], g, *args, **kwargs)

            # Store History
            if self.sto_hst and (myrank == 0):
                log_file.write(dff, 'grad_obj')
                log_file.write(dgg, 'grad_con')

            # Gradient Assignment
            df = numpy.zeros(len(opt_problem._variables.keys()))

            for i in range(len(opt_problem._variables.keys())):
                df[i] = dff[0, i]

            return df

        def eval_grad_g(x, flag, user_data=None):

            # if self.hot_start:
            #    if (myrank == 0):
            #        [vals,hist_end] = hos_file.read(ident=['grad_obj','grad_con'])
            #        if hist_end:
            #            self.hot_start = False
            #            hos_file.close()
            #        else:
            #            dff = vals['grad_obj'][0].reshape((len(opt_problem._objectives.keys()),len(opt_problem._variables.keys())))
            #            dgg = vals['grad_con'][0].reshape((len(opt_problem._constraints.keys()),len(opt_problem._variables.keys())))
            #
            #
            #    if self.pll:
            #        self.hot_start = Bcast(self.hot_start,root=0)
            #
            #    if self.hot_start and self.pll:
            #        [dff,dgg] = Bcast([dff,dgg],root=0)
            #

            # if not self.hot_start:

            if flag:
                a = numpy.zeros(
                    len(opt_problem._variables.keys()) *
                    len(opt_problem._constraints.keys()), int)
                b = numpy.zeros(
                    len(opt_problem._variables.keys()) *
                    len(opt_problem._constraints.keys()), int)

                for i in range(len(opt_problem._constraints.keys())):
                    for j in range(len(opt_problem._variables.keys())):
                        a[i * len(opt_problem._variables.keys()) + j] = i
                        b[i * len(opt_problem._variables.keys()) + j] = j
                return (a, b)

            else:
                [f, g, fail] = opt_problem.obj_fun(x, *args, **kwargs)
                dff, dgg = gradient.getGrad(x, group_ids, [f], g, *args,
                                            **kwargs)

                # Store History
                if self.sto_hst and (myrank == 0):
                    log_file.write(dff, 'grad_obj')
                    log_file.write(dgg, 'grad_con')

                # Gradient Assignment
                a = numpy.zeros([
                    len(opt_problem._variables.keys()) *
                    len(opt_problem._constraints.keys())
                ])
                for i in range(len(opt_problem._constraints.keys())):
                    for j in range(len(opt_problem._variables.keys())):
                        a[i * len(opt_problem._variables.keys()) +
                          j] = dgg[i, j]

                return a

        # Variables Handling
        nvar = len(opt_problem._variables.keys())
        xl = []
        xu = []
        xx = []
        for key in opt_problem._variables.keys():
            if opt_problem._variables[key].type == 'c':
                xl.append(opt_problem._variables[key].lower)
                xu.append(opt_problem._variables[key].upper)
                xx.append(opt_problem._variables[key].value)
            elif opt_problem._variables[key].type == 'i':
                raise IOError('IPOPT cannot handle integer design variables')
            elif opt_problem._variables[key].type == 'd':
                raise IOError('IPOPT cannot handle discrete design variables')

        xl = numpy.array(xl)
        xu = numpy.array(xu)
        xx = numpy.array(xx)

        # Variables Groups Handling
        group_ids = {}
        if opt_problem.use_groups:
            k = 0
            for key in opt_problem._vargroups.keys():
                group_len = len(opt_problem._vargroups[key]['ids'])
                group_ids[opt_problem._vargroups[key][
                    'name']] = [k, k + group_len]
                k += group_len

            # Constraints Handling
        ncon = len(opt_problem._constraints.keys())
        blc = []
        buc = []
        if ncon > 0:
            for key in opt_problem._constraints.keys():
                if opt_problem._constraints[key].type == 'e':
                    blc.append(opt_problem._constraints[key].equal)
                    buc.append(opt_problem._constraints[key].equal)
                elif opt_problem._constraints[key].type == 'i':
                    blc.append(opt_problem._constraints[key].lower)
                    buc.append(opt_problem._constraints[key].upper)

        else:
            if ((store_sol) and (myrank == 0)):
                print("Optimization Problem Does Not Have Constraints\n")
                print("Unconstrained Optimization Initiated\n")

            ncon = 1
            blc.append(-inf)
            buc.append(inf)

        blc = numpy.array(blc)
        buc = numpy.array(buc)

        # Objective Handling
        objfunc = opt_problem.obj_fun
        nobj = len(opt_problem._objectives.keys())
        ff = []
        for key in opt_problem._objectives.keys():
            ff.append(opt_problem._objectives[key].value)

        ff = numpy.array(ff)

        # Create an IPOPT instance problem
        nnzj = nvar * ncon
        nnzh = nvar * nvar
        ipopt = pyipopt.create(nvar, xl, xu, ncon, blc, buc, nnzj, nnzh,
                               eval_f, eval_grad_f, eval_g, eval_grad_g)

        # Setup Options
        optionss = self.options.copy()
        del optionss['defaults']

        for i in optionss:
            if not self.options['defaults'][i][1] == optionss[i][1]:
                if self.options[i][0].__name__ == 'int':
                    ipopt.int_option(i, self.options[i][1])

                if self.options[i][0].__name__ == 'float':
                    ipopt.num_option(i, self.options[i][1])

                if self.options[i][0].__name__ == 'str':
                    ipopt.str_option(i, self.options[i][1])

        # Run IPOPT

        t0 = time.time()
        r = ipopt.solve(xx)
        sol_time = time.time() - t0

        if (myrank == 0):
            if self.sto_hst:
                log_file.close()
                if tmp_file:
                    hos_file.close()
                    name = hos_file.filename
                    os.remove(name + '.cue')
                    os.remove(name + '.bin')
                    os.rename(name + '_tmp.cue', name + '.cue')
                    os.rename(name + '_tmp.bin', name + '.bin')

        ipopt.close()

        # Store Results
        sol_inform = {}
        print(r)
        sol_inform['value'] = r[-1]  # ifail[0]
        sol_inform['text'] = self.getInform(r[-1])  # self.getInform(ifail[0])

        if store_sol:
            sol_name = 'IPOPT Solution to ' + opt_problem.name

            sol_options = copy.copy(self.options)
            if 'default' in sol_options:
                del sol_options['defaults']

            sol_evals = 0

            sol_vars = copy.deepcopy(opt_problem._variables)
            i = 0
            x = r[0]
            for key in sol_vars.keys():
                sol_vars[key].value = x[i]
                i += 1

            sol_objs = copy.deepcopy(opt_problem._objectives)
            sol_objs[0].value = r[4]

            sol_cons = {}

            if ncon > 0:
                sol_lambda = r[3]
            else:
                sol_lambda = {}

            opt_problem.addSol(
                self.__class__.__name__,
                sol_name,
                objfunc,
                sol_time,
                sol_evals,
                sol_inform,
                sol_vars,
                sol_objs,
                sol_cons,
                sol_options,
                display_opts=disp_opts,
                Lambda=sol_lambda,
                Sensitivities=sens_type,
                myrank=myrank,
                arguments=args,
                **kwargs)

        return ff, xx, sol_inform  # ifail[0]
Esempio n. 53
0
    def solve(self, x0=None, options={}):
        """Solve the nonlinear problem from a specified starting point.

        Arguments:
        x0 -- array of length self.nvars with initial guesses for values of
        self.variables (in that order), or a scalar to be used as the initial
        guess for all variables; if not supplied, all variables will be 
        set initially to 1.0.
        options -- dictionary {'option': value} of options to pass to 
        IPOPT. Options will be taken from (in increasing order of priority)
        IPOPT's default values, the module-wide default_ipopt_options, those
        specified in self.fixed_variable_treatment or self.ipopt_options, 
        this argument, or the ipopt.opt file (if any) in the working directory.
        Errors will result if invalid names or values for options are given 
        here.
        
        Returns: 
        x -- array of variable values returned by IPOPT (ordered as in 
        self.variables)

        Also sets self.x, self.zl, self.zu, self.constraint_multipliers, 
        self.obj_value, and self.status to x, the lower and upper
        bound multipliers, the Lagrange multipliers associated with
        the constraint functions, the final objective function value,
        and the optimzation status, respectively, and sets self.soln
        to a dictionary mapping each variable key to its value in
        self.x.
   
        This method does not recompile the objective, constraint and
        derivative functions if self.compile() has already been
        called. If anything except bounds on variables, bounds on
        constraints, and/or parameter values has been changed since
        the last time self.compile() has been called, self.compile
        must be called again before solving, or this method will
        attempt to solve an out-of-date version of the problem.

        Each call to solve does create a new pyipopt problem instance
        as self.nlp, closing the old one beforehand if self.active_nlp 
        is true, and resetting self.active_nlp to true afterwards.

        """

        if not self._compiled:
            self.compile()

        if x0 is None:
            x0 = np.ones(self.nvar)
        elif np.isscalar(x0):
            x0 = x0 * np.ones(self.nvar)
        else:
            # Assume this is a vector. Check its length, as trying to
            # proceed with an inappropriately sized starting point may
            # lead to crashes.
            if len(x0) != self.nvar:
                message = (
                    'Starting point has wrong dimension (needed %d, given %d)'
                    % (self.nvar, len(x0)))
                raise ValueError(message)

        self.close_nlp()

        x_L, x_U = self.make_variable_bound_vectors()
        g_L, g_U = self.make_constraint_bound_vectors()

        # The following avoids a segmentation fault that results
        # when bound methods are supplied as evaluation functions
        # to pyipopt, which I don't really understand:
        eval_f = lambda x, user_data=None: self.eval_f(x)
        eval_grad_f = lambda x, user_data=None: self.eval_grad_f(x)
        eval_g = lambda x, user_data=None: self.eval_g(x)
        eval_jac_g = lambda x, flag, user_data=None: self.eval_jac_g(x, flag)
        eval_h= lambda x, lagrange, obj_factor, flag, user_data=None: \
            self.eval_h(x,lagrange,obj_factor,flag)

        self.nlp = pyipopt.create(self.nvar, x_L, x_U, self.ncon, g_L, g_U,
                                  self._nnzj, self._nnzh, eval_f, eval_grad_f,
                                  eval_g, eval_jac_g, eval_h)

        self.active_nlp = True
        # Handle generic IPOPT options
        all_options = {}
        all_options.update(default_ipopt_options)
        all_options['fixed_variable_treatment'] = self.fixed_variable_treatment
        all_options.update(self.ipopt_options)
        all_options.update(options)
        for option, value in all_options.iteritems():
            if isinstance(value, str):
                self.nlp.str_option(option, value)
            elif isinstance(value, int):
                self.nlp.int_option(option, value)
            else:
                self.nlp.num_option(option, value)

        (self.x, self.zl, self.zu, self.constraint_multipliers, self.obj_value,
         self.status) = self.nlp.solve(x0)

        self.soln = dict(zip(self.variables, self.x))

        if self.status not in (0, 1):
            raise OptimizationFailure('IPOPT exited with status %d' %
                                      self.status)

        return self.x.copy()
# sparse jacobian of g
assert_array_equal(eval_jac_g_adolc(x0, True)[0], eval_jac_g(x0, True)[0])
assert_array_equal(eval_jac_g_adolc(x0, True)[1], eval_jac_g(x0, True)[1])
assert_array_equal(eval_jac_g_adolc(x0, False), eval_jac_g(x0, False))


# test optimization with PYIPOPT
nvar = 4
x_L = numpy.ones((nvar), dtype=numpy.float_) * 1.0
x_U = numpy.ones((nvar), dtype=numpy.float_) * 5.0

ncon = 2
g_L = numpy.array([25.0, 40.0])
g_U = numpy.array([2.0 * pow(10.0, 19), 40.0])

nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h)
start_time = time.time()
result = nlp.solve(x0)
end_time = time.time()
nlp.close()
pure_python_optimization_time = end_time - start_time


nlp_adolc = pyipopt.create(
    nvar,
    x_L,
    x_U,
    ncon,
    g_L,
    g_U,
    nnzj,

#PRELIMS: define the number of nonzeros in the jacobian 
val = Jac_c_adolc(x0, False) 
nnzj = len(val)            

  
# create the nonlinear programming model
nlp2 = pyipopt.create(
nvar, 
x_L,
x_U,
ncon,
g_L,
g_U,
nnzj,
nnzh,
adFun,
grFun,
const_adolc,
Jac_c_adolc,
hessLag_adolc
)

nlp2.num_option('expect_infeasible_problem_ctol', 1e-15)
nlp2.int_option('max_iter', 5000)

nlp2.num_option('dual_inf_tol', 1e-5)
nlp2.num_option('constr_viol_tol', 1e-5)
nlp2.num_option('tol', 1e-6)
Esempio n. 56
0
    def solve(self):
        """ Solves AC optimal power flow.
        """
        case = self.om.case
        base_mva = case.base_mva
        # TODO: Explain this value.
        self.opt["cost_mult"] = 1e-4

        # Unpack the OPF model.
        bs, ln, gn, _ = self._unpack_model(self.om)
        # Compute problem dimensions.
        ipol, _, nb, nl, _, ny, nxyz = self._dimension_data(bs, ln, gn)

        # Compute problem dimensions.
        ng = len(gn)
#        gpol = [g for g in gn if g.pcost_model == POLYNOMIAL]
        # Indexes of constrained lines.
        il = array([i for i,l in enumerate(ln) if 0.0 < l.rate_a < 1e10])
        nl2 = len(il)

        # Linear constraints (l <= A*x <= u).
        A, l, u = self.om.linear_constraints()
#        AA, bb = self._linear_constraints(self.om)

        _, xmin, xmax = self._var_bounds()

        # Select an interior initial point for interior point solver.
        x0 = self._initial_interior_point(bs, gn, xmin, xmax, ny)

        # Build admittance matrices.
        Ybus, Yf, Yt = case.Y

        # Optimisation variables.
        Va = self.om.get_var("Va")
        Vm = self.om.get_var("Vm")
        Pg = self.om.get_var("Pg")
        Qg = self.om.get_var("Qg")

        # Adds a constraint on the reference bus angles.
#        xmin, xmax = self._ref_bus_angle_constraint(bs, Va, xmin, xmax)

        def f_fcn(x, user_data=None):
            """ Evaluates the objective function.
            """
            p_gen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            q_gen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            # Polynomial cost of P and Q.
            xx = r_[p_gen, q_gen] * base_mva
            if len(ipol) > 0:
                f = sum([g.total_cost(xx[i]) for i,g in enumerate(gn)])
            else:
                f = 0

            # Piecewise linear cost of P and Q.
            if ny:
                y = self.om.get_var("y")
                ccost = csr_matrix((ones(ny),
                    (range(y.i1, y.iN + 1), zeros(ny))), shape=(nxyz, 1)).T
                f = f + ccost * x
            else:
                ccost = zeros((1, nxyz))
                # TODO: Generalised cost term.

            return f


        def df_fcn(x, usr_data=None):
            """ Calculates gradient of the objective function.
            """
            p_gen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            q_gen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            xx = r_[p_gen, q_gen] * base_mva

            if ny > 0:
                y = self.om.get_var("y")
                iy = range(y.i1, y.iN + 1)
                ccost = \
                    csr_matrix((ones(ny), (iy, zeros(ny))), shape=(nxyz, 1)).T
            else:
                ccost = zeros((1, nxyz))
                # TODO: Generalised cost term.

            iPg = range(Pg.i1, Pg.iN + 1)
            iQg = range(Qg.i1, Qg.iN + 1)

            # Polynomial cost of P and Q.
            df_dPgQg = zeros((2 * ng, 1))        # w.r.t p.u. Pg and Qg
#            df_dPgQg[ipol] = matrix([g.poly_cost(xx[i], 1) for g in gpol])
#            for i, g in enumerate(gn):
#                der = polyder(list(g.p_cost))
#                df_dPgQg[i] = polyval(der, xx[i]) * base_mva
            for i in ipol:
                df_dPgQg[i] = \
                    base_mva * polyval(polyder(list(gn[i].p_cost)), xx[i])

            df = zeros((nxyz, 1))
            df[iPg] = df_dPgQg[:ng]
            df[iQg] = df_dPgQg[ng:ng + ng]

            # Piecewise linear cost of P and Q.
            df = df + ccost.T
            # TODO: Generalised cost term.

            return asarray(df).flatten()


        def g_fcn(x, usr_data=None):
            """ Evaluates the non-linear constraint values.
            """
            Pgen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            Qgen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            for i, g in enumerate(gn):
                g.p = Pgen[i] * base_mva # active generation in MW
                g.q = Qgen[i] * base_mva # reactive generation in MVAr

            # Rebuild the net complex bus power injection vector in p.u.
            Sbus = case.getSbus(bs)

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)

            # Evaluate the power flow equations.
            mis = V * conj(Ybus * V) - Sbus

            # Equality constraints (power flow).
            g = r_[mis.real,  # active power mismatch for all buses
                   mis.imag]  # reactive power mismatch for all buses

            # Inequality constraints (branch flow limits).
            # (line constraint is actually on square of limit)
            flow_max = array([(l.rate_a / base_mva)**2 for l in ln])
            # FIXME: There must be a more elegant method for this.
            for i, v in enumerate(flow_max):
                if v == 0.0:
                    flow_max[i] = Inf

            if self.flow_lim == IFLOW:
                If = Yf * V
                It = Yt * V
                # Branch current limits.
                h = r_[(If * conj(If)) - flow_max,
                       (If * conj(It)) - flow_max]
            else:
                i_fbus = [e.from_bus._i for e in ln]
                i_tbus = [e.to_bus._i for e in ln]
                # Complex power injected at "from" bus (p.u.).
                Sf = V[i_fbus] * conj(Yf * V)
                # Complex power injected at "to" bus (p.u.).
                St = V[i_tbus] * conj(Yt * V)
                if self.flow_lim == PFLOW: # active power limit, P (Pan Wei)
                    # Branch real power limits.
                    h = r_[Sf.real()**2 - flow_max,
                           St.real()**2 - flow_max]
                elif self.flow_lim == SFLOW: # apparent power limit, |S|
                    # Branch apparent power limits.
                    h = r_[(Sf * conj(Sf)) - flow_max,
                           (St * conj(St)) - flow_max].real
                else:
                    raise ValueError

            return r_[g, h]


        def dg_fcn(x, flag, usr_data=None):
            """ Calculates the Jacobian matrix. It takes two arguments, the
                first is the variable x and the second is a Boolean flag. If
                the flag is true, the function returns a tuple of arrays
                (row, col) to indicate the sparse structure of the Jacobian
                matrix. If the flag is false the function returns the values
                of the Jacobian matrix with length nnzj.
            """
            iVa = range(Va.i1, Va.iN + 1)
            iVm = range(Vm.i1, Vm.iN + 1)
            iPg = range(Pg.i1, Pg.iN + 1)
            iQg = range(Qg.i1, Qg.iN + 1)
            iVaVmPgQg = r_[iVa, iVm, iPg, iQg].T

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)

            # Compute partials of injected bus powers.
            dSbus_dVm, dSbus_dVa = case.dSbus_dV(Ybus, V)

            i_gbus = [gen.bus._i for gen in gn]
            neg_Cg = csr_matrix((-ones(ng), (i_gbus, range(ng))), (nb, ng))

            # Transposed Jacobian of the power balance equality constraints.
            dg = lil_matrix((nxyz, 2 * nb))

            blank = csr_matrix((nb, ng))
            dg[iVaVmPgQg, :] = vstack([
                hstack([dSbus_dVa.real, dSbus_dVm.real, neg_Cg, blank]),
                hstack([dSbus_dVa.imag, dSbus_dVm.imag, blank, neg_Cg])
            ], "csr").T

            # Compute partials of flows w.r.t V.
            if self.flow_lim == IFLOW:
                dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \
                    case.dIbr_dV(Yf, Yt, V)
            else:
                dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \
                    case.dSbr_dV(Yf, Yt, V, bs, ln)
            if self.flow_lim == PFLOW:
                dFf_dVa = dFf_dVa.real
                dFf_dVm = dFf_dVm.real
                dFt_dVa = dFt_dVa.real
                dFt_dVm = dFt_dVm.real
                Ff = Ff.real
                Ft = Ft.real

            # Squared magnitude of flow (complex power, current or real power).
            df_dVa, df_dVm, dt_dVa, dt_dVm = \
                case.dAbr_dV(dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft)

            # Construct Jacobian of inequality constraints (branch limits) and
            # transpose it.
            dh = lil_matrix((nxyz, 2 * nl))
            dh[r_[iVa, iVm].T, :] = vstack([hstack([df_dVa, df_dVm]),
                                            hstack([dt_dVa, dt_dVm])], "csr").T

            J = vstack([dg, dh, A]).tocoo()

            if flag:
                return (J.row, J.col)
            else:
                return J.data


        def h_fcn(x, lagrange, obj_factor, flag, usr_data=None):
            """ Evaluates the Hessian of the Lagrangian.
            """
            neqnln = 2 * nb
            niqnln = 2 * len(il) # no. of lines with constraints

            Pgen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            Qgen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            for i, g in enumerate(gn):
                g.p = Pgen[i] * base_mva # active generation in MW
                g.q = Qgen[i] * base_mva # reactive generation in MVAr

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)
            nxtra = nxyz - 2 * nb

            #------------------------------------------------------------------
            #  Evaluate d2f.
            #------------------------------------------------------------------

            d2f_dPg2 = lil_matrix((ng, 1)) # w.r.t p.u. Pg
            d2f_dQg2 = lil_matrix((ng, 1)) # w.r.t p.u. Qg]

            for i in ipol:
                d2f_dPg2[i, 0] = polyval(polyder(list(gn[i].p_cost), 2),
                                         Pg.v0[i] * base_mva) * base_mva**2
#            for i in ipol:
#                d2f_dQg2[i] = polyval(polyder(list(gn[i].p_cost), 2),
#                                      Qg.v0[i] * base_mva) * base_mva**2

            i = r_[range(Pg.i1, Pg.iN + 1), range(Qg.i1, Qg.iN + 1)]

            d2f = csr_matrix((vstack([d2f_dPg2, d2f_dQg2]).toarray().flatten(),
                              (i, i)), shape=(nxyz, nxyz))
            # TODO: Generalised cost model.
            d2f = d2f * self.opt["cost_mult"]

            #------------------------------------------------------------------
            #  Evaluate Hessian of power balance constraints.
            #------------------------------------------------------------------

            eqnonlin = lagrange[:neqnln]
#            nlam = len(lagrange["eqnonlin"]) / 2
            nlam = len(eqnonlin) / 2
            lamP = eqnonlin[:nlam]
            lamQ = eqnonlin[nlam:nlam + nlam]
            Gpaa, Gpav, Gpva, Gpvv = case.d2Sbus_dV2(Ybus, V, lamP)
            Gqaa, Gqav, Gqva, Gqvv = case.d2Sbus_dV2(Ybus, V, lamQ)

            d2G = vstack([
                hstack([
                    vstack([hstack([Gpaa, Gpav]),
                            hstack([Gpva, Gpvv])]).real +
                    vstack([hstack([Gqaa, Gqav]),
                            hstack([Gqva, Gqvv])]).imag,
                    csr_matrix((2 * nb, nxtra))]),
                hstack([
                    csr_matrix((nxtra, 2 * nb)),
                    csr_matrix((nxtra, nxtra))
                ])
            ], "csr")

            #------------------------------------------------------------------
            #  Evaluate Hessian of flow constraints.
            #------------------------------------------------------------------

            ineqnonlin = lagrange[neqnln:neqnln + niqnln]
            nmu = len(ineqnonlin) / 2
            muF = ineqnonlin[:nmu]
            muT = ineqnonlin[nmu:nmu + nmu]
            if self.flow_lim == "I":
                dIf_dVa, dIf_dVm, dIt_dVa, dIt_dVm, If, It = \
                    case.dIbr_dV(Yf, Yt, V)
                Hfaa, Hfav, Hfva, Hfvv = \
                    case.d2AIbr_dV2(dIf_dVa, dIf_dVm, If, Yf, V, muF)
                Htaa, Htav, Htva, Htvv = \
                    case.d2AIbr_dV2(dIt_dVa, dIt_dVm, It, Yt, V, muT)
            else:
                f = [e.from_bus._i for e in ln]
                t = [e.to_bus._i for e in ln]
                # Line-bus connection matrices.
                Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb))
                Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb))
                dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St = \
                    case.dSbr_dV(Yf, Yt, V)
                if self.flow_lim == PFLOW:
                    Hfaa, Hfav, Hfva, Hfvv = \
                        case.d2ASbr_dV2(dSf_dVa.real(), dSf_dVm.real(),
                                        Sf.real(), Cf, Yf, V, muF)
                    Htaa, Htav, Htva, Htvv = \
                        case.d2ASbr_dV2(dSt_dVa.real(), dSt_dVm.real(),
                                        St.real(), Ct, Yt, V, muT)
                elif self.flow_lim == SFLOW:
                    Hfaa, Hfav, Hfva, Hfvv = \
                        case.d2ASbr_dV2(dSf_dVa, dSf_dVm, Sf, Cf, Yf, V, muF)
                    Htaa, Htav, Htva, Htvv = \
                        case.d2ASbr_dV2(dSt_dVa, dSt_dVm, St, Ct, Yt, V, muT)
                else:
                    raise ValueError

            d2H = vstack([
                hstack([
                    vstack([hstack([Hfaa, Hfav]),
                            hstack([Hfva, Hfvv])]) +
                    vstack([hstack([Htaa, Htav]),
                            hstack([Htva, Htvv])]),
                    csr_matrix((2 * nb, nxtra))
                ]),
                hstack([
                    csr_matrix((nxtra, 2 * nb)),
                    csr_matrix((nxtra, nxtra))
                ])
            ], "csr")

            H = d2f + d2G + d2H

            if flag:
                return (H.row, H.col)
            else:
                return H.data

        n = len(x0) # the number of variables
        gl = r_[zeros(2 * nb), -Inf * ones(2 * nl2), l]
        gu = r_[zeros(2 * nb),       zeros(2 * nl2), u]
        m = len(gl) # the number of constraints
        nnzj = 0 # the number of nonzeros in Jacobian matrix
        nnzh = 0 # the number of non-zeros in Hessian matrix

        nlp = pyipopt.create(n, xmin, xmax, m, gl, gu, nnzj, nnzh,
                             f_fcn, df_fcn, g_fcn, dg_fcn, h_fcn)

#        x, zl, zu, obj = nlp.solve(x0)
        success = nlp.solve(x0)
        nlp.close()

        print "Success:", success
        print "Solution of the primal variables, x"
#        print x
        print "Solution of the bound multipliers, z_L and z_U"
#        print zl, zu
        print "Objective value"