def test_le_cnt_with_opt(): prob = OptProb() model = prob.get_model() param = TestParam("test", 3, 1) param.resample() var = Variable(model, param) prob.add_var(var) twoe1 = AffExpr(constant=np.array([[2], [0], [-1]], dtype=np.float)) # var = Variable(model, param) lhs = AffExpr({param: 1}) fluent = LinLEFluent('test_eq', 0, lhs, twoe1) variables = [var] param_to_var = {param: var} constraints = Constraints(model) if isinstance(fluent, LinLEFluent): lhs = to_gurobi_expr(fluent.lhs, param_to_var) rhs = to_gurobi_expr(fluent.rhs, param_to_var) model.update() constraints.add_leq_cntr(lhs, rhs) assert not fluent.satisfied() prob.add_constraints(constraints) obj = grb.QuadExpr() for var in variables: for i in range(var.rows): for j in range(var.cols): v = var.grb_vars[i, j] obj += v*v - 2*v + 1 prob.obj_sqp = obj prob.optimize() var.update_hl_param() assert fluent.satisfied() assert np.all(variables[0].get_val() == np.array([[1], [0], [-1]]))
def test_eq_cnt_with_gurobi(): model = grb.Model() obj = grb.QuadExpr() param = TestParam("test", 3, 1) param.resample() var = Variable(model, param) zero = AffExpr(constant=np.array([[0], [0], [0]], dtype=np.float)) # var = Variable(model, param) lhs = AffExpr({param: 1}) fluent = LinEqFluent('test_eq', 0, lhs, zero) variables = [Variable(model, param)] param_to_var = {param: variables[0]} constraints = Constraints(model) if isinstance(fluent, LinEqFluent): lhs = to_gurobi_expr(fluent.lhs, param_to_var) rhs = to_gurobi_expr(fluent.rhs, param_to_var) model.update() constraints.add_eq_cntr(lhs, rhs) assert not fluent.satisfied() for var in variables: for i in range(var.rows): for j in range(var.cols): v = var.grb_vars[i, j] obj += v*v - 2*v + 1 model.setObjective(obj) model.update() model.optimize() var.update() var.update_hl_param() assert fluent.satisfied()
def solve_at_priority_regular(self, priority, fix_sampled_params=False, recently_sampled=None): # initialize gurobi Model object prob = OptProb() self.recently_converged_vio_fluent = None if recently_sampled is None: recently_sampled = [] params = set() for hla in self.hlas: params.update(hla.get_params()) prob.add_hla(hla) # check whether there are duplicate parameters (prob shouldn't add variables into the problem multiple times) param_to_var = {} for param in params: if param in param_to_var: continue if param.is_resampled and fix_sampled_params: print param.name, "is fixed." const = Constant(param) param_to_var[param] = const elif not param.is_var: print param.name, "is not a var." const = Constant(param) param_to_var[param] = const else: # param.is_var var = Variable(param, recently_sampled=(param in recently_sampled)) param_to_var[param] = var prob.add_var(var) prob.update() # max(priority, 0) because priority = -1 used for straight-line init self.add_cnts_to_opt_prob(prob, param_to_var=param_to_var, priority=max(priority, 0)) # Re-sampled params need to update their corresponding variables for param, var in param_to_var.items(): var.set_val(param.get_value()) for hla in self.hlas: if hla.cost != 0.0: hla.cost.to_gurobi_fn(prob, param_to_var) prob.inc_obj(hla.cost) solver = Solver() if priority == -1 or priority == 0: # for initialization only because problem should be QP solver.initial_trust_box_size = 1e5 solver.max_merit_coeff_increases = 1 if priority == -1: # initialize straight-line trajectories success = prob.initialize_traj(mode="straight") elif priority == 0: # initialize from adapted previous trajectories success = prob.initialize_traj(mode=settings.INIT_MODE) else: if settings.DO_SQP or settings.BACKTRACKING_REFINEMENT or settings.BTSMOOTH_REFINEMENT: do_early_converge = False elif settings.DO_EARLY_CONVERGE: do_early_converge = True else: raise NotImplementedError success, converged_vio_fluent = solver.penalty_sqp(prob, do_early_converge=do_early_converge) if not do_early_converge: assert converged_vio_fluent is None self.recently_converged_vio_fluent = converged_vio_fluent for param, var in param_to_var.items(): var.update_hl_param() self.traj_cost = sum(prob.val(0)[0]) return success