def runOptimization(self): pyipopt.set_loglevel(0) self.nlp = pyipopt.create( self.optDict['nPrimal'], self.constraintDict['lowerBound'], self.constraintDict['upperBound'], self.optDict['nConstraint'], self.constraintDict['lowerConstraint'], self.constraintDict['upperConstraint'], self.optDict['nJacobianNonZero'], self.optDict['nHessianNonZero'], self.evalObjectiveFunction, self.evalObjectiveFunctionGradient, self.evalConstraints, self.evalConstraintsJacbobian) self.nlp.int_option("max_iter", 150) # self.nlp.str_option('derivative_test', 'first-order') # self.nlp.num_option('derivative_test_tol', 1e-2) # self.nlp.num_option('derivative_test_perturbation', 1e-3) self.nlp.num_option('point_perturbation_radius', 1e-3) # self.nlp.int_option('max_iter', 100) # self.nlp.str_option('derivative_test_print_all', 'yes') # self.nlp.int_option('derivative_test_first_index', 330) # self.nlp.num_option('acceptable_constr_viol_tol', 1) x, zl, zu, constraint_multipliers, obj, status = self.nlp.solve(self.initialGuess) return x[self.sliceDict['varMu']], x[self.sliceDict['varTheta']], self.optX, self.optY
def optimizePC(channel, noiseIfPower, rate, linkBandwidth, pMax, p0, m, verbosity=0): ''' Uses channel values, PHY parameters and power consumption characteristics to find minimal resource allocation. Returns resource allocation, objective value and IPOPT status. Input: channel - 3d array. 0d users, 1d n_tx, 2d n_rx noiseIfPower - total noise power over the linkbandwidth rate - target rate in bps pMax - maximum allowed transmission power p0 - power consumption at zero transmission (not sleep) m - power consumption load factor verbosity - IPOPT verbosity level Output: obj - solution objective value solution - resource share per user status - IPOPT status ''' # the channel dimensions tell some more parameters users = channel.shape[0] n_tx = channel.shape[1] n_rx = channel.shape[2] # preparing IPOPT parameters nvar = users # for readability x_L = zeros((nvar), dtype=float_) * 0.0 x_U = ones((nvar), dtype=float_) * 1.0 ncon = nvar + 1 # transmit power constraints and the unit sum g_L = zeros(1+nvar) # unit sum and all power constraints g_L[0] = 1. g_U = pMax * ones(1+nvar) # unit sum and all power constraints g_U[0] = 1. nnzj = nvar * (1+nvar) nnzh = 0 #used? x0 = repeat([1./(nvar+1)],nvar) # Starting point # IPOPT requires single parameter functions if n_tx is 2 and n_rx is 2: eval_f = lambda mus: optimMinPow2x2.eval_f(mus, noiseIfPower, channel, rate, linkBandwidth, p0, m) eval_grad_f = lambda mus: optimMinPow2x2.eval_grad_f(mus, noiseIfPower, channel, rate, linkBandwidth, p0, m) eval_g = lambda mus: optimMinPow2x2.eval_g(mus, noiseIfPower, channel, rate, linkBandwidth) eval_jac_g = lambda mus, flag: optimMinPow2x2.eval_jac_g(mus, noiseIfPower, channel, rate, linkBandwidth, flag) else: raise NotImplementedError # other combinations may be needed later # Call solve() pyipopt.set_loglevel(min([2,verbosity])) # verbose nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g) #nlp.int_option("max_iter", 3000) #nlp.num_option("tol", 1e-8) #nlp.num_option("acceptable_tol", 1e-2) #nlp.int_option("acceptable_iter", 0) nlp.str_option("derivative_test", "first-order") nlp.str_option("derivative_test_print_all", "no") #nlp.str_option("print_options_documentation", "yes") nlp.int_option("print_level", min([verbosity,12])) # maximum is 12 nlp.str_option("print_user_options", "yes") solution, zl, zu, obj, status = nlp.solve(x0) nlp.close() return obj, solution, status
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-1.2, 1], dtype=float) results = pyipopt.fmin_unconstrained( scipy.optimize.rosen, x0, fprime=scipy.optimize.rosen_der, fhess=scipy.optimize.rosen_hess, ) print results
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-3, -1, -3, -1], dtype=float) results = pyipopt.fmin_unconstrained( wood, x0, fprime=functools.partial(eval_grad, wood), fhess=functools.partial(eval_hess, wood), ) print(results)
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-3, -1, -3, -1], dtype=float) results = pyipopt.fmin_unconstrained( wood, x0, fprime=functools.partial(eval_grad, wood), fhess=functools.partial(eval_hess, wood), ) print results
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-0.27, -0.9], dtype=float) results = pyipopt.fmin_unconstrained( himmelblau, x0, fprime=functools.partial(eval_grad, himmelblau), fhess=functools.partial(eval_hess, himmelblau), ) print results
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-1.2, 1], dtype=float) results = pyipopt.fmin_unconstrained( scipy.optimize.rosen, x0, fprime=scipy.optimize.rosen_der, fhess=scipy.optimize.rosen_hess, ) print(results)
def solve_unconstr(theta0): pyipopt.set_loglevel(1) thetahat , _, _, _, _, fval = pyipopt.fmin_unconstrained( eval_f, theta0, fprime=eval_grad, fhess=eval_hess, ) return thetahat
def main(): # verbose pyipopt.set_loglevel(2) # define the parameters and their box constraints nvar = 2 x_L = numpy.array([-3, -3], dtype=float) x_U = numpy.array([3, 3], dtype=float) # define the inequality constraints ncon = 0 g_L = numpy.array([], dtype=float) g_U = numpy.array([], dtype=float) # define the number of nonzeros in the jacobian and in the hessian # there are no nonzeros in the constraint jacobian nnzj = 0 # there are maximum nonzeros (nvar*(nvar+1))/2 in the lagrangian hessian nnzh = 3 # create the nonlinear programming model nlp = pyipopt.create( nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h, apply_new, ) # define the initial guess x0 = numpy.array([-1.2, 1], dtype=float) # compute the results using ipopt results = nlp.solve(x0) # free the model nlp.close() # report the results print results
def main(): # verbose pyipopt.set_loglevel(2) # define the parameters and their box constraints nvar = 2 x_L = numpy.array([-3, -3], dtype=float) x_U = numpy.array([3, 3], dtype=float) # define the inequality constraints ncon = 0 g_L = numpy.array([], dtype=float) g_U = numpy.array([], dtype=float) # define the number of nonzeros in the jacobian and in the hessian # there are no nonzeros in the constraint jacobian nnzj = 0 # there are maximum nonzeros (nvar*(nvar+1))/2 in the lagrangian hessian nnzh = 3 # create the nonlinear programming model nlp = pyipopt.create( nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h, apply_new, ) # define the initial guess x0 = numpy.array([-1.2, 1], dtype=float) # compute the results using ipopt results = nlp.solve(x0) # free the model nlp.close() # report the results print(results)
def __build_pyipopt_problem(self): """Build the pyipopt problem from the OptimizationProblem instance.""" import pyipopt from functools import partial self.rfn = ReducedFunctionalNumPy(self.problem.reduced_functional) ncontrols = len(self.rfn.get_controls()) (lb, ub) = self.__get_bounds() (nconstraints, fun_g, jac_g, clb, cub) = self.__get_constraints() constraints_nnz = nconstraints * ncontrols # A callback that evaluates the functional and derivative. J = self.rfn.__call__ dJ = partial(self.rfn.derivative, forget=False) nlp = pyipopt.create( len(ub), # length of control vector lb, # lower bounds on control vector ub, # upper bounds on control vector nconstraints, # number of constraints clb, # lower bounds on constraints, cub, # upper bounds on constraints, constraints_nnz, # number of nonzeros in the constraint Jacobian 0, # number of nonzeros in the Hessian J, # to evaluate the functional dJ, # to evaluate the gradient fun_g, # to evaluate the constraints jac_g) # to evaluate the constraint Jacobian pyipopt.set_loglevel(1) # turn off annoying pyipopt logging if rank(self.problem.reduced_functional.mpi_comm()) > 0: nlp.int_option('print_level', 0) # disable redundant IPOPT output in parallel else: nlp.int_option('print_level', 6) # very useful IPOPT output if isinstance(self.problem, MaximizationProblem): # multiply objective function by -1 internally in # ipopt to maximise instead of minimise nlp.num_option('obj_scaling_factor', -1.0) self.pyipopt_problem = nlp
def __build_pyipopt_problem(self): """Build the pyipopt problem from the OptimizationProblem instance.""" import pyipopt from functools import partial self.rfn = ReducedFunctionalNumPy(self.problem.reduced_functional) ncontrols = len(self.rfn.get_controls()) (lb, ub) = self.__get_bounds() (nconstraints, fun_g, jac_g, clb, cub) = self.__get_constraints() constraints_nnz = nconstraints * ncontrols # A callback that evaluates the functional and derivative. J = self.rfn.__call__ dJ = partial(self.rfn.derivative, forget=False) nlp = pyipopt.create( len(ub), # length of control vector lb, # lower bounds on control vector ub, # upper bounds on control vector nconstraints, # number of constraints clb, # lower bounds on constraints, cub, # upper bounds on constraints, constraints_nnz, # number of nonzeros in the constraint Jacobian 0, # number of nonzeros in the Hessian J, # to evaluate the functional dJ, # to evaluate the gradient fun_g, # to evaluate the constraints jac_g, ) # to evaluate the constraint Jacobian pyipopt.set_loglevel(1) # turn off annoying pyipopt logging if rank(self.problem.reduced_functional.mpi_comm()) > 0: nlp.int_option("print_level", 0) # disable redundant IPOPT output in parallel else: nlp.int_option("print_level", 6) # very useful IPOPT output if isinstance(self.problem, MaximizationProblem): # multiply objective function by -1 internally in # ipopt to maximise instead of minimise nlp.num_option("obj_scaling_factor", -1.0) self.pyipopt_problem = nlp
def solve_constr(theta0, use_hess = False): pyipopt.set_loglevel(1) n = theta0.size x_L = np.array([pyipopt.NLP_LOWER_BOUND_INF]*n, dtype=float) x_U = np.array([pyipopt.NLP_UPPER_BOUND_INF]*n, dtype=float) ncon = pstationtrue.size g_L = g_U = pstationtrue nnzj = maskj.sum() nnzh = maskh.sum() idxrj, idxcj = np.mgrid[:ncon, :n] idxrh, idxch = np.mgrid[:n, :n] eval_c = lambda t: constr(t) eval_j = lambda t, f: (idxrj[maskj], idxcj[maskj]) if f else np.squeeze(jab(t))[maskj] if use_hess: eval_h = lambda t, l, o, f: (idxrh[maskh], idxch[maskh]) if f else np.squeeze(hess_constr(t,l,o))[maskh] nlp = pyipopt.create(theta0.size, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad, eval_c, eval_j, eval_h) else: nlp = pyipopt.create(theta0.size, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad, eval_c, eval_j) results = nlp.solve(theta0) nlp.close() return results
def nlinprog(obj, cons, Vars, x0=None): """nlinprog Parameters ---------- obj : cons : Returns ------- Notes ------ """ pyipopt.set_loglevel(0) # increasing verbosity -> 0, 1, 2 cons = list(cons) if x0 is None: x0 = np.zeros(len(Vars)) return cons_opt(obj, cons, Vars, x0)
def OptSolver(bounds, OptObj, max_iter=10): """ Function that returns a pyipopt object with OptObj atributes """ # Number of Design Variables nvar = OptObj.nvars # Upper and lower bounds x_L = numpy.ones(nvar) * bounds[0] x_U = numpy.ones(nvar) * bounds[1] # Number of non-zeros gradients constraints_nnz = nvar*OptObj.cst_num acst_L = numpy.array(OptObj.cst_L) acst_U = numpy.array(OptObj.cst_U) PyIpOptObj = pyipopt.create(nvar, # number of the design variables x_L, # lower bounds of the design variables x_U, # upper bounds of the design variables OptObj.cst_num, # number of constraints acst_L, # lower bounds on constraints, acst_U, # upper bounds on constraints, constraints_nnz, # number of nonzeros in the constraint Jacobian 0, # number of nonzeros in the Hessian OptObj.obj_fun, # objective function OptObj.obj_dfun, # gradient of the objective function OptObj.cst_fval, # constraint function OptObj.jacobian ) # gradient of the constraint function #Parameters PyIpOptObj.num_option('acceptable_tol', 1.0e-10) PyIpOptObj.num_option('eta_phi', 1e-12) # eta_phi: Relaxation factor in the Armijo condition. PyIpOptObj.num_option('theta_max_fact', 30000) # Determines upper bound for constraint violation in the filter. PyIpOptObj.int_option('max_soc', 20) PyIpOptObj.int_option('max_iter', max_iter) PyIpOptObj.int_option('watchdog_shortened_iter_trigger', 20) PyIpOptObj.int_option('accept_after_max_steps', 5) pyipopt.set_loglevel(1) # turn off annoying pyipopt logging PyIpOptObj.int_option('print_level', 6) # very useful IPOPT output return PyIpOptObj
def optimizePCDTX(channel, noiseIfPower, rate, linkBandwidth, pMax, p0, m, pS, verbosity=0): ''' Uses channel values, PHY parameters and power consumption characteristics to find minimal resource allocation under power control with DTX. Returns resource allocation, objective value and IPOPT status. Input: channel - 3d array. 0d users, 1d n_tx, 2d n_rx noiseIfPower - total noise power over the linkbandwidth rate - target rate in bps pMax - maximum allowed transmission power p0 - power consumption at zero transmission (not sleep) pS - power consumption during sleep mode m - power consumption load factor verbosity - IPOPT verbosity level Output: obj - solution objective value solution - resource share per user status - IPOPT status ''' # the channel dimensions tell some more parameters users = channel.shape[0] n_tx = channel.shape[1] n_rx = channel.shape[2] # preparing IPOPT parameters nvar = users + 1 # sleep mode is integrated as the last parameter x_L = zeros((nvar), dtype=float_) * 0.0 x_U = ones((nvar), dtype=float_) * 1.0 ncon = users + 1 # transmit power constraints and the unit sum g_L = zeros(ncon) # unit sum and all power constraints g_L[0] = 1. g_U = pMax * ones(ncon) # unit sum and all power constraints g_U[0] = 1. nnzj = ncon * ncon nnzh = 0 # tell that there is no hessian (Hessian approximation) x0 = repeat([1. / (nvar + 1)], nvar) # Starting point # IPOPT requires single parameter functions if n_tx is 2 and n_rx is 2: eval_f = lambda mus: optimMinPow2x2DTX.eval_f( mus, noiseIfPower, channel, rate, linkBandwidth, p0, m, pS) eval_grad_f = lambda mus: optimMinPow2x2DTX.eval_grad_f( mus, noiseIfPower, channel, rate, linkBandwidth, p0, m, pS) eval_g = lambda mus: optimMinPow2x2DTX.eval_g( mus, noiseIfPower, channel, rate, linkBandwidth) eval_jac_g = lambda mus, flag: optimMinPow2x2DTX.eval_jac_g( mus, noiseIfPower, channel, rate, linkBandwidth, flag) else: raise NotImplementedError # other combinations may be needed later pyipopt.set_loglevel(min([verbosity, 2])) # verbose nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g) #nlp.int_option("max_iter", 3000) #nlp.num_option("tol", 1e-8) #nlp.num_option("acceptable_tol", 1e-2) #nlp.int_option("acceptable_iter", 0) nlp.str_option("derivative_test", "first-order") nlp.str_option("derivative_test_print_all", "yes") #nlp.str_option("print_options_documentation", "yes") nlp.int_option("print_level", min([verbosity, 12])) # maximum is 12 nlp.str_option("print_user_options", "yes") solution, zl, zu, obj, status = nlp.solve(x0) nlp.close() if sum(solution) > 1.0001 or status is not 0: print 'Sum of solution:', sum(solution) print 'Status:', status raise ValueError('Invalid solution') return obj, solution, status
def optimizeManeuver(self): ''' Sets up the optimization problem then calculates the optimal maneuver poses. Publishes the resulting path if the optimization is successful. Args: ----- msg: ROS Bool message Returns: -------- path: the optimal path as a ROS nav_msgs/Path message ''' # Make sure a target pose exists if (self.target_x is not None): # Grab the current pose from the recent transform if there is no 'odom' topic being published to if (self.current_pose is None): # DEBUG: print( "No 'odom' message received. Waiting for transform from 'odom' to 'base_link'..." ) listener = tf.TransformListener() try: listener.waitForTransform('/odom', '/base_link', rospy.Time(0), rospy.Duration(10)) (trans, rot) = listener.lookupTransform('/odom', '/base_link', rospy.Time(0)) self.current_pose = Pose() self.current_pose.position.x = trans[0] self.current_pose.position.y = trans[1] self.current_pose.position.z = trans[2] self.current_pose.orientation.x = rot[0] self.current_pose.orientation.y = rot[1] self.current_pose.orientation.z = rot[2] self.current_pose.orientation.w = rot[3] except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): return False, "Error looking up transform from 'odom' to 'base_link'" # DEBUG: print("Running maneuver optimization...") # Initial value for optimization x0 = [self.start_x_s, self.start_y_s] lower_bounds = [-2, -16] upper_bounds = [20, 5] # Set params # TODO: add the forklifts current pose from "/odom" current_pose2D = Pose2D() current_pose2D.x = self.current_pose.position.x current_pose2D.y = self.current_pose.position.y euler_angles = euler_from_quaternion([ self.current_pose.orientation.x, self.current_pose.orientation.y, self.current_pose.orientation.z, self.current_pose.orientation.w ]) current_pose2D.theta = euler_angles[2] params = { "current_pose": [current_pose2D.x, current_pose2D.y, current_pose2D.theta], "forklift_length": (self.base_to_back + self.base_to_clamp), "weights": [10, 1, 0.1, 1], "obstacles": self.obstacles, "min_radius": self.min_radius } print("Using optimization method: %d" % self.optimization_method) #==================================================================# # vvv Add Autograd gradient functions here if you get to it vvv #==================================================================# # Generate Gradient Functions self.grad_maneuverObjective = grad( lambda x: self.maneuverObjective(x, params)) self.hessian_maneuverObjective = hessian( lambda x: self.maneuverObjective(x, params)) self.jac_maneuverIneqConstraints = jacobian( lambda x: self.maneuverIneqConstraints(x, params)) self.hessian_maneuverIneqConstraints = hessian( lambda x: self.maneuverIneqConstraints(x, params)) # # Test Gradients against finite difference method # delta = 0.0000001 # x = np.array([self.start_x_s, self.start_y_s], dtype=np.float) # dx = deepcopy(x) # dx[0] = x[0] + delta # print("Objective: ") # print(self.maneuverObjective(x, params)) # print(self.maneuverObjective(dx, params)) # print("Autograd:") # print(self.grad_maneuverObjective(x)) # print("Finite Difference:") # print((self.maneuverObjective(dx, params) - self.maneuverObjective(x, params))/delta) # print("Hessian:") # print(self.hessian_maneuverObjective(x)) # print("Autograd con:") # print(self.jac_maneuverIneqConstraints(x)) # print("Constraint Jacobian:") # print(self.gradManeuverIneqConstraints(x, params)) # print("Hessian con:") # print(self.hessian_maneuverIneqConstraints(x)) #==================================================================# # ^^^ Add Autograd gradient functions here if you get to it ^^^ #==================================================================# #==================================================================# # scipy.optimize.minimize optimizer #==================================================================# if (self.optimization_method == 1): # Set up optimization problem obj = lambda x: self.maneuverObjective(x, params) obj_bfgs = lambda x: self.maneuverObjective(x, params) ineq_con = { 'type': 'ineq', 'fun': lambda x: self.maneuverIneqConstraints(x, params), 'jac': None } bounds = [(lower_bounds[0], upper_bounds[0]), (lower_bounds[1], upper_bounds[1])] # Optimize tic = time.time() #res = minimize(obj, x0, jac=self.grad_maneuverObjective, method='SLSQP', bounds=bounds, constraints=ineq_con) #res = minimize(obj, x0, method='SLSQP', bounds=bounds, constraints=ineq_con) res = minimize(obj_bfgs, x0, method='COBYLA', bounds=bounds, constraints=ineq_con) toc = time.time() # DEBUG: print("===== Optimization Results =====") print("time: %f(sec)" % (toc - tic)) print("Success: %s" % res.success) print("Message: %s" % res.message) print("Results:\n x: %f, y: %f" % (res.x[0], res.x[1])) # Store result x_s = res.x[0] y_s = res.x[1] # Update starting point to be the current result self.start_x_s = x_s self.start_y_s = y_s self.optimization_success = res.success message = res.message #==================================================================# # scipy.optimize.minimize optimizer #==================================================================# #==================================================================# # IPOPT Optimizer #==================================================================# if (self.optimization_method == 2): # Initial value for optimization x0_ip = np.array([x0[0], x0[1]]) nvar = 2 x_L = np.array(lower_bounds, dtype=np.float_) x_U = np.array(upper_bounds, dtype=np.float_) ncon = 1 g_L = np.array([0], dtype=np.float_) g_U = np.array([0], dtype=np.float_) nnzj = nvar * ncon nnzh = nvar**2 def eval_f(x): return self.maneuverObjective(x, params) def eval_grad_f(x): return self.grad_maneuverObjective(x) def eval_g(x): return self.maneuverIneqConstraints(x, params) def eval_jac_g(x, flag): if flag: rows = np.concatenate( (np.ones(nvar) * 0, np.ones(nvar) * 1)) cols = np.concatenate( (np.linspace(0, nvar - 1, nvar), np.linspace(nvar, 2 * nvar - 1, nvar))) return (rows, cols) else: return self.jac_maneuverIneqConstraints(x) def eval_h(x, lagrange, obj_factor, flag): if flag: rows = np.array([]) for i in range(nvar * ncon): rows = np.concatenate((rows, np.ones(nvar) * i)) cols = np.array([]) for i in range(nvar * ncon): cols = np.concatenate( (cols, np.linspace(0, nvar - 1, nvar))) return (rows, cols) else: constraint_hessian = self.hessian_maneuverIneqConstraints( x) constraint_sum = lagrange[0] * constraint_hessian[ 0, :, :] constraint_sum = constraint_sum + lagrange[ 1] * constraint_hessian[1, :, :] return obj_factor * self.hessian_maneuverObjective( x) + constraint_sum # Not using hessian, remove this line when using it nnzh = 0 nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g) pyipopt.set_loglevel(0) tic = time.time() x, zl, zu, constraint_multipliers, obj, status = nlp.solve( x0_ip) nlp.close() toc = time.time() def print_variable(variable_name, value): for i in range(len(value)): print("{} {}".format( variable_name + "[" + str(i) + "] =", value[i])) print("Solution of the primal variables, x") print_variable("x", x) # # print("Solution of the bound multipliers, z_L and z_U") # print_variable("z_L", zl) # print_variable("z_U", zu) # # print("Solution of the constraint multipliers, lambda") # print_variable("lambda", constraint_multipliers) # # print("Objective value") # print("f(x*) = {}".format(obj)) # DEBUG: print("===== Optimization Results (IPOPT) =====") print("time: %f" % (toc - tic)) print("Success: %s" % status) print("Message: %s" % "") print("Results:\n x: %f, y: %f" % (x[0], x[1])) # Store result x_s = x[0] y_s = x[1] self.optimization_success = (status > 0) message = "ipopt optimization finished with status: {0:d}".format( status) #==================================================================# # IPOPT Optimizer #==================================================================# #=================================================================# # Use hardcoded value #=================================================================# if (self.optimization_method == 0): x_s = x0[0] y_s = x0[1] self.optimization_success = 1 message = "used hardcoded starting value" #=================================================================# # Use hardcoded value #=================================================================# # Print optimized point print("Approach starting point: (%0.4f, %0.4f)" % (x_s, y_s)) # Set initial pose angle for the forklift to be the direction facing the roll theta_s = np.arctan2(self.target_y - y_s, self.target_x - x_s) # Initialize path messages current_time = rospy.Time.now() path1_msg = Path() path2_msg = Path() path1_gear_msg = PathWithGear() path2_gear_msg = PathWithGear() path1_msg.header.stamp = current_time path1_msg.header.frame_id = "odom" path2_msg.header.stamp = current_time path2_msg.header.frame_id = "odom" path1_gear_msg.path.header.stamp = current_time path1_gear_msg.path.header.frame_id = "odom" path2_gear_msg.path.header.stamp = current_time path2_gear_msg.path.header.frame_id = "odom" # Publish first segment of maneuver # NOTE: Just set the path to be a single point at the current position. This will make the master controller work the same and quickly move through the two maneuver paths point = PoseStamped() point.header.frame_id = "odom" point.pose.position.x = self.current_pose.position.x point.pose.position.y = self.current_pose.position.y path1_msg.poses.append(point) path1_gear_msg.path.poses.append(point) # Set gear, positive alpha = forward gear self.path1_pub.publish(path1_msg) path1_gear_msg.gear = 1 self.path1_gear_pub.publish(path1_gear_msg) # Publish second segment of maneuver point = PoseStamped() point.header.frame_id = "odom" point.pose.position.x = self.current_pose.position.x point.pose.position.y = self.current_pose.position.y path2_msg.poses.append(point) path2_gear_msg.path.poses.append(point) # Set gear, positive alpha = forward gear self.path2_pub.publish(path2_msg) path2_gear_msg.gear = 1 self.path2_gear_pub.publish(path2_gear_msg) if (self.optimization_success): # If optimization was successful, publish the new target # position for the A* algorithm (you will want to make this a # separate "goal" value distinct from the roll target position) rospy.set_param("/control_panel_node/goal_x", float(x_s)) rospy.set_param("/control_panel_node/goal_y", float(y_s)) self.update_obstacle_end_pose = False # Publish the starting pose for the approach b-spline path approach_start_pose = PoseStamped() approach_start_pose.header.frame_id = "/odom" approach_start_pose.pose.position.x = x_s approach_start_pose.pose.position.y = y_s quat_forklift = quaternion_from_euler(0, 0, wrapToPi(theta_s)) approach_start_pose.pose.orientation.x = quat_forklift[0] approach_start_pose.pose.orientation.y = quat_forklift[1] approach_start_pose.pose.orientation.z = quat_forklift[2] approach_start_pose.pose.orientation.w = quat_forklift[3] self.approach_pose_pub.publish(approach_start_pose) return self.optimization_success, message else: return False, "No target pose exists"
def _minimize(self, initial_val, loss_grad_func, equality_funcs, equality_grad_funcs, inequality_funcs, inequality_grad_funcs, packed_bounds, step_callback, optimizer_kwargs): # initial value should be float64 initial_val = np.array(initial_val, dtype=np.float_) # objective function def eval_f(x, user_data=None): loss, _ = loss_grad_func(x) return np.array(loss, dtype=np.float_) # gradient of objective function def eval_grad_f(x, user_data=None): _, grad_f = loss_grad_func(x) return np.array(grad_f, dtype=np.float_) # gradient function (first inequalities then equalities) def eval_g(x, user_data=None): inequalities = [inequality_funcs[i](x) for i in range(nineqcon)] equalities = [equality_funcs[i](x) for i in range(neqcon)] return np.array(inequalities + equalities, dtype=np.float_).reshape(ncon, ) # hessian of the lagrangian (first inequalities then equalities) def eval_h(x, lagrange, obj_factor, flag, user_data=None): rows, cols = np.tril_indices(nvar) if flag: return (np.array(rows, dtype=np.int_), np.array(cols, dtype=np.int_)) else: loss = [loss_hessian_func(x)] inequalities = [inequality_hessian_funcs[i](x) for i in range(nineqcon)] equalities = [equality_hessian_funcs[i](x) for i in range(neqcon)] values = np.zeros([nvar, nvar]) values += obj_factor * loss[0][0] for idc in range(nineqcon): values += lagrange[idc] * inequalities[idc][0] for idc in range(neqcon): values += lagrange[idc + nineqcon] * equalities[idc][0] return np.array(values.reshape(nvar, nvar)[rows, cols], dtype=np.float_) # jacobian for gradient (first inequalities the equalities) def eval_jac_g(x, flag, user_data=None): rows, cols = np.indices((ncon, nvar)) if flag: return (np.array(rows.reshape(-1, 1), dtype=np.int_), np.array(cols.reshape(-1, 1), dtype=np.int_)) else: inequalities = [inequality_grad_funcs[i](x) for i in range(nineqcon)] equalities = [equality_grad_funcs[i](x) for i in range(neqcon)] values = np.empty([ncon, nvar]) for idc in range(nineqcon): values[idc, :] = inequalities[idc][0] for idc in range(neqcon): values[idc + nineqcon, :] = equalities[idc][0] return np.array(values.reshape(ncon * nvar, ), dtype=np.float_) # box constraints on the variables nvar = int(np.sum([np.prod(self._vars[i].get_shape().as_list()) for i in range(len(self._vars))])) if self._packed_bounds is None: x_L = -np.ones((nvar), dtype=np.float_) * np.inf x_U = np.ones((nvar), dtype=np.float_) * np.inf else: x_L, x_U = zip(*self._packed_bounds) x_L = np.array(x_L, dtype=np.float_) x_U = np.array(x_U, dtype=np.float_) # inequality constraints as g(x)>=0 and equality constraints as h(x)=0 nineqcon = len(self._inequalities) neqcon = len(self._equalities) ncon = nineqcon + neqcon g_L_ineq = np.zeros((nineqcon), dtype=np.float_) g_U_ineq = np.ones((nineqcon), dtype=np.float_) * 2.0 * pow(10.0, 19) g_L_eq = np.zeros((neqcon), dtype=np.float_) g_U_eq = np.zeros((neqcon), dtype=np.float_) g_L = np.concatenate((g_L_ineq, g_L_eq), axis=0) g_U = np.concatenate((g_U_ineq, g_U_eq), axis=0) nnzj = nvar * ncon nnzh = int(nvar * (nvar + 1) / 2) minimize_args = [nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g] # create nlp in ipopt import pyipopt # log_level decides if logs from pyipopt are desired -- these are logs on # top of what is returned from ipopt set by "print_level"; see below if "log_level" in optimizer_kwargs["options"]: pyipopt.set_loglevel(optimizer_kwargs["options"]["log_level"]) nlp = pyipopt.create(*minimize_args) # check https://www.coin-or.org/Ipopt/documentation/node40.html # for more options and default settings # default print_level=5 # default max_iter=3000 # default tol=1e-8 for optvar in optimizer_kwargs["options"]: if optvar is "log_level": print elif type(optimizer_kwargs["options"][optvar]) is np.str: nlp.str_option(optvar, optimizer_kwargs["options"][optvar]) elif type(optimizer_kwargs["options"][optvar]) is np.int: nlp.int_option(optvar, optimizer_kwargs["options"][optvar]) else: nlp.num_option(optvar, optimizer_kwargs["options"][optvar]) result_x, zl, zu, constraint_multipliers, result_f, status = nlp.solve(initial_val) nlp.close() message_lines = [ 'Optimization terminated with:', ' Message: %s', ' Objective function value: %f', ] message_args = [status, result_f] logging.info('\n'.join(message_lines), *message_args) print("Optimization terminated with message: {}".format(status)) return result_x
modelname = 'colpitts' # Name of differential Model function to use mapname = 'rk2' # Name of Discretization function to use #File of initial paths. If initfile =='random', will generate random #NxD path and save to initpaths.txt initfile = 'random' savefile = 'test_anneal.txt' #Filename ot save output # Optimization Options epsf = 1e-6 # Function Tolerance maxits = 100000 #Max Iterations epsg = 1e-6 # Constraint Tol linear_solver = 'ma97' #epsx = 1e-8 # Step Size Tolerance pyipopt.set_loglevel(1) # Returns the differential model xdot = f(x,t). Should return numpy # array. def lorenz96(x, t): D = len(x) dxdt = [] for i in range(D): dxdt.append(x[np.mod(i-1,D)]*(x[np.mod(i+1,D)]-x[np.mod(i-2,D)]) - x[i] + 8.17) return np.array(dxdt) def colpitts(x,t): dx = x[1] dy =-5.0*(x[0]+x[2])-2.0*x[1] dz = 2.8*(x[1]+1-np.exp(-x[0])) return np.array([dx,dy,dz])
# E1002 - disable complaints about .__init__: Use super on an old style class # R0903 - Disable complaints about Too few public methods # C0103 - Disable complaints Invalid name "setUp" # (should match [a-z_][a-z0-9_]{2,30}$) import functools import sys #public symbols __all__ = ['IPOPTdriver'] from numpy import zeros, array, append import pyipopt pyipopt.set_loglevel(0) # Avoid wrapper entry/return trace. from openmdao.main.api import Driver from openmdao.main.datatypes.api import Enum, Float, Int, Dict from openmdao.main.exceptions import RunStopped from openmdao.main.interfaces import IHasParameters, IHasConstraints, \ IHasObjective, implements, IOptimizer from openmdao.main.hasconstraints import HasConstraints from openmdao.main.hasobjective import HasObjective from openmdao.main.hasparameters import HasParameters from openmdao.util.decorators import add_delegate class IpoptReturnStatus(object): '''A fake enum for the possible values of
def get_ipopt_miu(self): """Optimized miu Calculate optimal miu. Called when opt=True is passed to loop(). ... Args: None Returns nd.array: Array of optimal miu, n = params.tmax """ try: import pyipopt except ImportError: pyipopt = None print('OPTIMIZATION ERROR: It appears that you do not have ' 'pyipopt installed. Please install it before running ' 'optimization.') x0 = np.concatenate( (np.linspace(0, 1, 40) ** (1 - np.linspace(0, 1, 40)), np.ones(20)) ) M = 0 nnzj = 0 nnzh = 0 xl = np.zeros(self.params.tmax) xu = np.ones(self.params.tmax) xl[0] = .005 xu[0] = .005 xl[-20:] = 1 gl = np.zeros(M) gu = np.ones(M) * 4.0 def eval_f(_x0): if (_x0 == self.opt_x).all() and self.opt_obj is not None: return self.opt_obj else: self.opt_x = _x0.copy() return self.obj_loop(_x0) def eval_grad_f(_x0): if (_x0 == self.opt_x).all() and self.opt_grad_f is not None: return self.opt_grad_f else: self.opt_x = _x0.copy() return self.grad_loop(_x0) def eval_g(x): return np.zeros(M) def eval_jac_g(x, flag): if flag: return [], [] else: return np.empty(M) pyipopt.set_loglevel(1) nlp = pyipopt.create( self.opt_vars, xl, xu, M, gl, gu, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, ) nlp.num_option('constr_viol_tol', 8e-7) nlp.int_option('max_iter', 30) nlp.num_option('max_cpu_time', 60) nlp.num_option('tol', self.opt_tol) # nlp.num_option('acceptable_tol', 1e-4) # nlp.int_option('acceptable_iter', 4) nlp.num_option('obj_scaling_factor', -1e+0) nlp.int_option('print_level', 0) nlp.str_option('linear_solver', 'ma57') # nlp.str_option('derivative_test', 'first-order') x = nlp.solve(x0)[0] nlp.close() return x
def get_ipopt_options(rd, lb, ub, tol, max_iter, **kwargs): """Get options for IPOPT module (interior point algorithm) See `<https://projects.coin-or.org/Ipopt>` rd : :py:class`dolfin_adjoint.ReducedFunctional` The reduced functional lb : list Lower bound on the control ub : list Upper bound on the control tol : float Tolerance max_iter : int Maximum number of iterations *Returns* nlp : ipopt instance A nonlinear ipopt problem """ ncontrols = len(ub) nconstraints = 0 empty = np.array([], dtype=float) clb = empty cub = empty constraints_nnz = nconstraints * ncontrols # The constraint function, should do nothing def fun_g(x, user_data=None): return empty # The constraint Jacobian def jac_g(x, flag, user_data=None): if flag: rows = np.array([], dtype=int) cols = np.array([], dtype=int) return (rows, cols) else: return empty J = rd.__call__ dJ = rd.derivative nlp = pyipopt.create( ncontrols, # length of control vector lb, # lower bounds on control vector ub, # upper bounds on control vector 0, # number of constraints clb, # lower bounds on constraints, cub, # upper bounds on constraints, 0, # number of nonzeros in the constraint Jacobian 0, # number of nonzeros in the Hessian J, # to evaluate the functional dJ, # to evaluate the gradient fun_g, # to evaluate the constraints jac_g, ) # to evaluate the constraint Jacobian pyipopt.set_loglevel(1) return nlp
def sim(self, max_iter=1000): """ Function that returns a pyipopt object with OptObj atributes """ super().sim() if self.rho_initial != None: self.rho = Function(self.A, self.rho_initial.vector(), name="Control") print("Optimization Beginning") self.state = Function(self.W) self.state2 = Function(self.W2) if self.cst_num >= 1: self.__vf_fun_var_assem__() if self.cst_num >= 2: self.__vf_fun_var_assem2__() if self.cst_num >= 3: self.__vf_fun_var_assem3__() self.nvars = len(self.rho.vector()) # Number of Design Variables nvar = self.nvars # Upper and lower bounds x_L = numpy.ones(nvar) * 0. #bounds[0] x_U = numpy.ones(nvar) * 1. #bounds[1] # Number of non-zeros gradients constraints_nnz = nvar * self.cst_num acst_L = numpy.array(self.cst_L) acst_U = numpy.array(self.cst_U) PyIpOptObj = pyipopt.create( nvar, # number of the design variables x_L, # lower bounds of the design variables x_U, # upper bounds of the design variables self.cst_num, # number of constraints acst_L, # lower bounds on constraints, acst_U, # upper bounds on constraints, constraints_nnz, # number of nonzeros in the constraint Jacobian 0, # number of nonzeros in the Hessian self.obj_fun, # objective function self.obj_dfun, # gradient of the objective function self.cst_fval, # constraint function self.jacobian) # gradient of the constraint function #Parameters PyIpOptObj.num_option('obj_scaling_factor', 1.0) #MAXIMIZE OR MINIMIZE PyIpOptObj.num_option('acceptable_tol', 1.0e-10) PyIpOptObj.num_option( 'eta_phi', 1e-12) # eta_phi: Relaxation factor in the Armijo condition. PyIpOptObj.num_option( 'theta_max_fact', 30000 ) # Determines upper bound for constraint violation in the filter. PyIpOptObj.int_option('max_soc', 20) PyIpOptObj.int_option('max_iter', max_iter) PyIpOptObj.int_option('watchdog_shortened_iter_trigger', 20) PyIpOptObj.int_option('accept_after_max_steps', 5) pyipopt.set_loglevel(1) # turn off annoying pyipopt logging PyIpOptObj.int_option('print_level', 6) # very useful IPOPT output x0 = numpy.copy(self.rho.vector()) self.rho.vector( )[:], zl, zu, constraint_multipliers, obj, status = PyIpOptObj.solve( x0) return self.rho
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-0.27, -0.9], dtype=float) results = pyipopt.fmin_unconstrained( himmelblau, x0, fprime=functools.partial(eval_grad, himmelblau), fhess=functools.partial(eval_hess, himmelblau), ) print(results)