def test_solver_options(self):
        prog = mp.MathematicalProgram()

        prog.SetSolverOption(SolverType.kGurobi, "double_key", 1.0)
        prog.SetSolverOption(GurobiSolver().solver_id(), "int_key", 2)
        prog.SetSolverOption(SolverType.kGurobi, "string_key", "3")

        options = prog.GetSolverOptions(SolverType.kGurobi)
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })
        options = prog.GetSolverOptions(GurobiSolver().solver_id())
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })

        # For now, just make sure the constructor exists.  Once we bind more
        # accessors, we can test them here.
        options_object = SolverOptions()
        solver_id = SolverId("dummy")
        self.assertEqual(solver_id.name(), "dummy")
        options_object.SetOption(solver_id, "double_key", 1.0)
        options_object.SetOption(solver_id, "int_key", 2)
        options_object.SetOption(solver_id, "string_key", "3")
        options = options_object.GetOptions(solver_id)
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })
Пример #2
0
 def test_gurobi_license(self):
     # Nominal use case.
     with GurobiSolver.AcquireLicense():
         pass
     # Inspect.
     with GurobiSolver.AcquireLicense() as license:
         self.assertTrue(license.is_valid())
     self.assertFalse(license.is_valid())
Пример #3
0
 def test_write_to_file(self):
     prog = mp.MathematicalProgram()
     x = prog.NewContinuousVariables(2)
     prog.AddLinearConstraint(x[0] + x[1] == 1)
     prog.AddQuadraticCost(x[0] * x[0] + x[1] * x[1])
     solver = GurobiSolver()
     file_name = temp_directory() + "/gurobi.mps"
     options = mp.SolverOptions()
     options.SetOption(solver.id(), "GRBwrite", file_name)
     result = solver.Solve(prog, None, options)
     self.assertTrue(os.path.exists(file_name))
Пример #4
0
def linear_program_drake(f, A, b, C=None, d=None, x_bound=None, solver='gurobi', **kwargs):
    """
    Solves the linear program
    minimize f^T * x
    s. t.    A * x <= b
             C * x  = d

    INPUTS:
        f: gradient of the cost function (2D numpy array)
        A: left hand side of the constraints (2D numpy array)
        b: right hand side of the constraints (2D numpy array)
        C: left hand side of the equalities (2D numpy array)
        d: right hand side of the equalities (2D numpy array)

    OUTPUTS:
        x_min: argument which minimizes the cost (its elements are nan if unfeasible or unbounded)
        cost_min: minimum of the cost function (nan if unfeasible or unbounded)
    """

    # program dimensions
    n_variables = f.shape[0]
    n_constraints = A.shape[0]

    # build program
    prog = mp.MathematicalProgram()
    x = prog.NewContinuousVariables(n_variables, "x")
    for i in range(0, n_constraints):
        prog.AddLinearConstraint((A[i,:] + 1e-15).dot(x) <= b[i])
    if C is not None and d is not None:
        for i in range(C.shape[0]):
            prog.AddLinearConstraint(C[i, :].dot(x) == d[i])
    prog.AddLinearCost((f.flatten() + 1e-15).dot(x))

    # options
    if solver == 'gurobi':
        for (key, value) in kwargs.items():
            prog.SetSolverOption(mp.SolverType.kGurobi, key, value)

    # set bounds to the solution
    if x_bound is not None:
        for i in range(0, n_variables):
                prog.AddLinearConstraint(x[i] <= x_bound)
                prog.AddLinearConstraint(x[i] >= -x_bound)

    # solve
    if solver == 'gurobi':
        solver = GurobiSolver()
    elif solver == 'mosek':
        solver = MosekSolver()
    result = solver.Solve(prog)
    x_min = np.reshape(prog.GetSolution(x), (n_variables,1))
    cost_min = f.T.dot(x_min)

    return [x_min, cost_min]
Пример #5
0
 def test_compute_iis(self):
     prog = mp.MathematicalProgram()
     x = prog.NewContinuousVariables(2)
     prog.AddBoundingBoxConstraint(1, np.inf, x)
     prog.AddLinearConstraint(x[0] + x[1] == 1)
     solver = GurobiSolver()
     ilp_file_name = temp_directory() + "/gurobi.ilp"
     options = mp.SolverOptions()
     options.SetOption(solver.id(), "GRBwrite", ilp_file_name)
     options.SetOption(solver.id(), "GRBcomputeIIS", 1)
     result = solver.Solve(prog, None, options)
     self.assertTrue(os.path.exists(ilp_file_name))
Пример #6
0
    def build_mpmiqp(self):

        # express the constrained dynamics as a list of polytopes in the (x,u,x+)-space
        P = graph_representation(self.S)
        m = big_m(P)

        # initialize program
        self.prog = MathematicalProgram()
        self.x = []
        self.u = []
        self.d = []
        obj = 0.
        self.binaries_lower_bound = []

        # initial conditions (set arbitrarily to zero in the building phase)
        self.x.append(self.prog.NewContinuousVariables(self.S.nx))
        self.initial_condition = []
        for k in range(self.S.nx):
            self.initial_condition.append(self.prog.AddLinearConstraint(self.x[0][k] == 0.).evaluator())

        # loop over time
        for t in range(self.N):

            # create input, mode and next state variables
            self.u.append(self.prog.NewContinuousVariables(self.S.nu))
            self.d.append(self.prog.NewBinaryVariables(self.S.nm))
            self.x.append(self.prog.NewContinuousVariables(self.S.nx))
            
            # enforce constrained dynamics (big-m methods)
            xux = np.concatenate((self.x[t], self.u[t], self.x[t+1]))
            for i in range(self.S.nm):
                mi_sum = np.sum([m[i][j] * self.d[t][j] for j in range(self.S.nm) if j != i], axis=0)
                for k in range(P[i].A.shape[0]):
                    self.prog.AddLinearConstraint(P[i].A[k].dot(xux) <= P[i].b[k] + mi_sum[k])

            # SOS1 on the binaries
            self.prog.AddLinearConstraint(sum(self.d[t]) == 1.)

            # stage cost to the objective
            obj += .5 * self.u[t].dot(self.R).dot(self.u[t])
            obj += .5 * self.x[t].dot(self.Q).dot(self.x[t])

        # terminal constraint
        for k in range(self.X_N.A.shape[0]):
            self.prog.AddLinearConstraint(self.X_N.A[k].dot(self.x[self.N]) <= self.X_N.b[k])

        # terminal cost
        obj += .5 * self.x[self.N].dot(self.P).dot(self.x[self.N])
        self.objective = self.prog.AddQuadraticCost(obj)

        # set solver
        self.solver = GurobiSolver()
        self.prog.SetSolverOption(self.solver.solver_type(), 'OutputFlag', 1)
Пример #7
0
 def test_gurobi_solver(self):
     prog = mp.MathematicalProgram()
     x = prog.NewContinuousVariables(2, "x")
     prog.AddLinearConstraint(x[0] >= 1)
     prog.AddLinearConstraint(x[1] >= 1)
     prog.AddQuadraticCost(np.eye(2), np.zeros(2), x)
     solver = GurobiSolver()
     self.assertTrue(solver.available())
     self.assertEqual(solver.solver_type(), mp.SolverType.kGurobi)
     result = solver.Solve(prog, None, None)
     self.assertTrue(result.is_success())
     x_expected = np.array([1, 1])
     self.assertTrue(np.allclose(result.GetSolution(x), x_expected))
Пример #8
0
 def test_gurobi_socp_dual(self):
     prog = mp.MathematicalProgram()
     x = prog.NewContinuousVariables(2, "x")
     constraint = prog.AddLorentzConeConstraint(
         [2., 2 * x[0], 3 * x[1] + 1])
     prog.AddLinearCost(x[1])
     solver = GurobiSolver()
     options = mp.SolverOptions()
     options.SetOption(solver.solver_id(), "QCPDual", 1)
     result = solver.Solve(prog, None, options)
     np.testing.assert_allclose(result.GetDualSolution(constraint),
                                np.array([-1. / 12]),
                                atol=1e-7)
Пример #9
0
 def test_api(self):
     plant = MultibodyPlant(time_step=0.01)
     model_instance = Parser(plant).AddModelFromFile(FindResourceOrThrow(
             "drake/bindings/pydrake/multibody/test/two_bodies.sdf"))
     plant.Finalize()
     context = plant.CreateDefaultContext()
     options = ik.GlobalInverseKinematics.Options()
     global_ik = ik.GlobalInverseKinematics(plant=plant, options=options)
     self.assertIsInstance(global_ik.prog(), mp.MathematicalProgram)
     self.assertIsInstance(global_ik.get_mutable_prog(),
                           mp.MathematicalProgram)
     body_index_A = plant.GetBodyIndices(model_instance)[0]
     body_index_B = plant.GetBodyIndices(model_instance)[1]
     self.assertEqual(
         global_ik.body_rotation_matrix(body_index=body_index_A).shape,
         (3, 3))
     self.assertEqual(
         global_ik.body_position(body_index=body_index_A).shape, (3, ))
     global_ik.AddWorldPositionConstraint(
         body_index=body_index_A,
         p_BQ=[0, 0, 0],
         box_lb_F=[-np.inf, -np.inf, -np.inf],
         box_ub_F=[np.inf, np.inf, np.inf],
         X_WF=RigidTransform())
     global_ik.AddWorldRelativePositionConstraint(
         body_index_B=body_index_B,
         p_BQ=[0, 0, 0],
         body_index_A=body_index_A,
         p_AP=[0, 0, 0],
         box_lb_F=[-np.inf, -np.inf, -np.inf],
         box_ub_F=[np.inf, np.inf, np.inf],
         X_WF=RigidTransform())
     global_ik.AddWorldOrientationConstraint(
         body_index=body_index_A,
         desired_orientation=Quaternion(),
         angle_tol=np.inf)
     global_ik.AddPostureCost(
         q_desired=plant.GetPositions(context),
         body_position_cost=[1] * plant.num_bodies(),
         body_orientation_cost=[1] * plant.num_bodies())
     gurobi_solver = GurobiSolver()
     if gurobi_solver.available():
         global_ik.SetInitialGuess(q=plant.GetPositions(context))
         result = gurobi_solver.Solve(global_ik.prog())
         self.assertTrue(result.is_success())
         global_ik.ReconstructGeneralizedPositionSolution(result=result)
Пример #10
0
 def test_gurobi_solver(self):
     prog = mp.MathematicalProgram()
     x = prog.NewContinuousVariables(2, "x")
     prog.AddLinearConstraint(x[0] >= 1)
     prog.AddLinearConstraint(x[1] >= 1)
     prog.AddQuadraticCost(np.eye(2), np.zeros(2), x)
     solver = GurobiSolver()
     self.assertTrue(solver.available())
     self.assertEqual(solver.solver_type(), mp.SolverType.kGurobi)
     result = solver.Solve(prog, None, None)
     self.assertTrue(result.is_success())
     x_expected = np.array([1, 1])
     self.assertTrue(np.allclose(result.GetSolution(x), x_expected))
     self.assertGreater(result.get_solver_details().optimizer_time, 0.)
     self.assertEqual(result.get_solver_details().error_code, 0)
     self.assertEqual(result.get_solver_details().optimization_status, 2)
     self.assertTrue(np.isnan(result.get_solver_details().objective_bound))
Пример #11
0
    def test_mixed_integer_optimization(self):
        prog = mp.MathematicalProgram()
        x = prog.NewBinaryVariables(3, "x")
        c = np.array([-1.0, -1.0, -2.0])
        prog.AddLinearCost(c.dot(x))
        a = np.array([1.0, 2.0, 3.0])
        prog.AddLinearConstraint(a.dot(x) <= 4)
        prog.AddLinearConstraint(x[0] + x[1], 1, np.inf)
        solver = GurobiSolver()
        result = solver.Solve(prog, None, None)
        self.assertTrue(result.is_success())

        # Test that we got the right solution for all x
        x_expected = np.array([1.0, 0.0, 1.0])
        self.assertTrue(np.all(np.isclose(result.GetSolution(x), x_expected)))

        # Also test by asking for the value of each element of x
        for i in range(3):
            self.assertAlmostEqual(result.GetSolution(x[i]), x_expected[i])
Пример #12
0
 def solve(self):
     solver = GurobiSolver()
     self.prog.SetSolverOption(mp.SolverType.kGurobi, "LogToConsole", 1)
     self.prog.SetSolverOption(mp.SolverType.kGurobi, "OutputFlag", 1)
     start_time = time.time()
     result = solver.Solve(self.prog)
     solve_time = time.time() - start_time
     assert result == mp.SolutionResult.kSolutionFound
     states, inputs, contact = self.prog.extract_solution(
         self.robot, self.vars.qcom, self.vars.qlimb, self.vars.contact,
         self.vars.contact_force, self.vars.contact_lambda,
         self.vars.contact_sequence_array)
     ts = states.components[0].breaks
     return SolutionData(opt=self,
                         states=states,
                         inputs=inputs,
                         contact_indicator=contact,
                         ts=ts,
                         solve_time=solve_time)
Пример #13
0
def quadratic_program(H, f, A, b, C=None, d=None):
    """
    Solves the convex (i.e., H > 0) quadratic program
    minimize x^T * H * x + f^T * x
    s. t.    A * x <= b
             C * x  = d

    INPUTS:
        H: Hessian of the cost function (2D numpy array)
        f: linear term of the cost function (2D numpy array)
        A: left hand side of the inequalities (2D numpy array)
        b: right hand side of the inequalities (2D numpy array)
        C: left hand side of the equalities (2D numpy array)
        d: right hand side of the equalities (2D numpy array)

    OUTPUTS:
        x_min: argument which minimizes the cost (its elements are nan if unfeasible or unbounded)
        cost_min: minimum of the cost function (nan if unfeasible or unbounded)
    """

    # program dimensions
    n_variables = f.shape[0]
    n_constraints = A.shape[0]

    # build program
    prog = mp.MathematicalProgram()
    x = prog.NewContinuousVariables(n_variables, "x")
    for i in range(0, n_constraints):
        prog.AddLinearConstraint((A[i,:] + 1e-15).dot(x) <= b[i])
    if C is not None and d is not None:
        for i in range(C.shape[0]):
            prog.AddLinearConstraint(C[i, :].dot(x) == d[i])
    prog.AddQuadraticCost(H, f, x)

    # solve
    solver = GurobiSolver()
    result = solver.Solve(prog)
    x_min = np.reshape(prog.GetSolution(x), (n_variables,1))
    cost_min = .5*x_min.T.dot(H.dot(x_min)) + f.T.dot(x_min)

    return [x_min, cost_min]
Пример #14
0
    def test_callback(self):
        prog = mp.MathematicalProgram()
        b = prog.NewBinaryVariables(4)
        prog.AddLinearConstraint(b[0] <= 1 - 0.5 * b[1])
        prog.AddLinearConstraint(b[1] <= 1 - 0.5 * b[0])
        prog.AddLinearCost(-b[0] - b[1])

        prog.SetSolverOption(GurobiSolver.id(), "Presolve", 0)
        prog.SetSolverOption(GurobiSolver.id(), "Heuristics", 0.)
        prog.SetSolverOption(GurobiSolver.id(), "Cuts", 0)
        prog.SetSolverOption(GurobiSolver.id(), "NodeMethod", 2)

        b_init = np.array([0, 0., 0., 0.])

        prog.SetInitialGuess(b, b_init)
        solver = GurobiSolver()

        explored_node_count = 0

        def node_callback(prog, solver_status_info, x, x_vals):
            nonlocal explored_node_count
            explored_node_count = solver_status_info.explored_node_count

        solver.AddMipNodeCallback(
            callback=lambda prog, solver_status_info, x, x_vals: node_callback(
                prog, solver_status_info, x, x_vals))

        best_objectives = []

        def sol_callback(prog, callback_info, objectives):
            print(f"explored nodes {callback_info.explored_node_count}")
            objectives.append(callback_info.best_objective)

        solver.AddMipSolCallback(
            callback=lambda prog, callback_info: sol_callback(
                prog, callback_info, best_objectives))

        result = solver.Solve(prog)
        self.assertTrue(result.is_success())
        self.assertGreater(explored_node_count, 0)
        self.assertGreater(len(best_objectives), 0)
Пример #15
0
class TestMathematicalProgram(unittest.TestCase):
    def test_program_construction(self):
        prog = mp.MathematicalProgram()
        vars = prog.NewContinuousVariables(5, "x")
        self.assertEqual(vars.dtype, sym.Variable)
        vars_all = prog.decision_variables()
        self.assertEqual(vars_all.shape, (5, ))

    def test_program_attributes_and_solver_selection(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")

        # Add linear equality constraints; make sure the solver works.
        prog.AddLinearConstraint(x[0] + x[1] == 0)
        prog.AddLinearConstraint(2 * x[0] - x[1] == 1)
        solver_id = mp.ChooseBestSolver(prog)
        self.assertEqual(solver_id.name(), "Linear system")
        solver = mp.MakeSolver(solver_id)
        self.assertEqual(solver.solver_id().name(), "Linear system")
        self.assertTrue(solver.AreProgramAttributesSatisfied(prog))
        result = solver.Solve(prog, None, None)
        self.assertTrue(result.is_success())

        # With an inequality constraint added, the "Linear system" solver
        # doesn't work anymore.
        prog.AddLinearConstraint(x[0] >= 0)
        self.assertFalse(solver.AreProgramAttributesSatisfied(prog))
        with self.assertRaises(ValueError):
            solver.Solve(prog, None, None)

        # A different solver will work, though.  We re-use the result object
        # (as a mutable output argument), and make sure that it changes.
        solver_id = mp.ChooseBestSolver(prog)
        self.assertNotEqual(solver_id.name(), "Linear system")
        solver = mp.MakeSolver(solver_id)
        solver.Solve(prog, None, None, result)
        self.assertTrue(result.is_success())
        self.assertEqual(result.get_solver_id().name(), solver_id.name())

    def test_module_level_solve_function_and_result_accessors(self):
        qp = TestQP()
        x_expected = np.array([1, 1])
        result = mp.Solve(qp.prog)
        self.assertTrue(result.is_success())
        self.assertTrue(np.allclose(result.get_x_val(), x_expected))
        self.assertEqual(result.get_solution_result(),
                         mp.SolutionResult.kSolutionFound)
        self.assertEqual(result.get_optimal_cost(), 3.0)
        self.assertTrue(result.get_solver_id().name())
        self.assertTrue(np.allclose(result.GetSolution(), x_expected))
        self.assertAlmostEqual(result.GetSolution(qp.x[0]), 1.0)
        self.assertTrue(np.allclose(result.GetSolution(qp.x), x_expected))
        self.assertTrue(
            result.GetSolution(sym.Expression(qp.x[0])).EqualTo(
                result.GetSolution(qp.x[0])))
        m = np.array([sym.Expression(qp.x[0]), sym.Expression(qp.x[1])])
        self.assertTrue(
            result.GetSolution(m)[1, 0].EqualTo(result.GetSolution(qp.x[1])))

        x_val_new = np.array([1, 2])
        result.set_x_val(x_val_new)
        np.testing.assert_array_equal(x_val_new, result.get_x_val())


# TODO(jwnimmer-tri) MOSEK is also able to solve mixed integer programs;
# perhaps we should test both of them?

    @unittest.skipUnless(GurobiSolver().available(), "Requires Gurobi")
    def test_mixed_integer_optimization(self):
        prog = mp.MathematicalProgram()
        x = prog.NewBinaryVariables(3, "x")
        c = np.array([-1.0, -1.0, -2.0])
        prog.AddLinearCost(c.dot(x))
        a = np.array([1.0, 2.0, 3.0])
        prog.AddLinearConstraint(a.dot(x) <= 4)
        prog.AddLinearConstraint(x[0] + x[1], 1, np.inf)
        prog.AddConstraint(
            LinearConstraint(np.array([[1., 1.]]), np.array([1]),
                             np.array([np.inf])), [x[0], x[1]])
        solver = GurobiSolver()
        result = solver.Solve(prog, None, None)
        self.assertTrue(result.is_success())

        # Test that we got the right solution for all x
        x_expected = np.array([1.0, 0.0, 1.0])
        self.assertTrue(np.all(np.isclose(result.GetSolution(x), x_expected)))

        # Also test by asking for the value of each element of x
        for i in range(3):
            self.assertAlmostEqual(result.GetSolution(x[i]), x_expected[i])

    def test_qp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        # N.B. Scalar-wise logical ops work for Expression, but array ops need
        # the workaround overloads from `pydrake.math`.
        prog.AddLinearConstraint(ge(x, 1))
        prog.AddQuadraticCost(np.eye(2), np.zeros(2), x)
        prog.AddQuadraticCost(np.eye(2), np.zeros(2), 1, x)
        # Redundant cost just to check the spelling.
        prog.AddQuadraticErrorCost(vars=x, Q=np.eye(2), x_desired=np.zeros(2))
        prog.AddL2NormCost(A=np.eye(2), b=np.zeros(2), vars=x)

        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

    def test_symbolic_qp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        prog.AddConstraint(x[0], 1., 100.)
        prog.AddConstraint(x[1] >= 1)
        prog.AddQuadraticCost(x[0]**2 + x[1]**2)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

    def test_bindings(self):
        qp = TestQP()
        self.assertEqual(str(qp.constraints[0]),
                         "BoundingBoxConstraint\n1 <= x(0) <= inf\n")
        prog = qp.prog
        x = qp.x

        self.assertEqual(prog.FindDecisionVariableIndices(vars=[x[0], x[1]]),
                         [0, 1])
        self.assertEqual(prog.decision_variable_index()[x[0].get_id()], 0)
        self.assertEqual(prog.decision_variable_index()[x[1].get_id()], 1)

        for binding in prog.GetAllCosts():
            self.assertIsInstance(binding.evaluator(), mp.Cost)
        for binding in prog.GetLinearConstraints():
            self.assertIsInstance(binding.evaluator(), mp.Constraint)
        for binding in prog.GetAllConstraints():
            self.assertIsInstance(binding.evaluator(), mp.Constraint)

        self.assertTrue(prog.linear_costs())
        for (i, binding) in enumerate(prog.linear_costs()):
            cost = binding.evaluator()
            self.assertTrue(np.allclose(cost.a(), np.ones((1, 2))))
            self.assertIsNone(cost.gradient_sparsity_pattern())

        self.assertTrue(prog.quadratic_costs())
        for (i, binding) in enumerate(prog.quadratic_costs()):
            cost = binding.evaluator()
            self.assertTrue(np.allclose(cost.Q(), np.eye(2)))
            self.assertTrue(np.allclose(cost.b(), np.zeros(2)))
            self.assertIsNone(cost.gradient_sparsity_pattern())

        self.assertTrue(prog.bounding_box_constraints())
        for (i, binding) in enumerate(prog.bounding_box_constraints()):
            constraint = binding.evaluator()
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[0]),
                prog.FindDecisionVariableIndex(var=x[i]))
            self.assertIsNone(constraint.gradient_sparsity_pattern())
            num_constraints = constraint.num_constraints()
            if num_constraints == 1:
                self.assertEqual(constraint.A(), 1)
                self.assertEqual(constraint.lower_bound(), 1)
                self.assertEqual(constraint.upper_bound(), np.inf)
            else:
                self.assertTrue(np.allclose(constraint.A(), np.eye(2)))
                self.assertTrue(
                    np.allclose(constraint.lower_bound(), [1, -np.inf]))
                self.assertTrue(
                    np.allclose(constraint.upper_bound(), [np.inf, 2]))

        self.assertTrue(prog.linear_constraints())
        for (i, binding) in enumerate(prog.linear_constraints()):
            constraint = binding.evaluator()
            self.assertIsNone(constraint.gradient_sparsity_pattern())
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[0]),
                prog.FindDecisionVariableIndex(var=x[0]))
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[1]),
                prog.FindDecisionVariableIndex(var=x[1]))
            self.assertTrue(np.allclose(constraint.A(), [3, -1]))
            self.assertTrue(constraint.lower_bound(), -2)
            self.assertTrue(constraint.upper_bound(), np.inf)

        self.assertTrue(prog.linear_equality_constraints())
        for (i, binding) in enumerate(prog.linear_equality_constraints()):
            self.assertIsNone(constraint.gradient_sparsity_pattern())
            constraint = binding.evaluator()
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[0]),
                prog.FindDecisionVariableIndex(var=x[0]))
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[1]),
                prog.FindDecisionVariableIndex(var=x[1]))
            self.assertTrue(np.allclose(constraint.A(), [1, 2]))
            self.assertTrue(constraint.lower_bound(), 3)
            self.assertTrue(constraint.upper_bound(), 3)

        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

    def test_constraint_api(self):
        prog = mp.MathematicalProgram()
        x0, = prog.NewContinuousVariables(1, "x")
        c = prog.AddLinearConstraint(x0 >= 2).evaluator()
        ce = prog.AddLinearEqualityConstraint(2 * x0, 1).evaluator()

        self.assertTrue(c.CheckSatisfied([2.], tol=1e-3))
        satisfied = c.CheckSatisfiedVectorized(np.array([1., 2., 3.]).reshape(
            (1, 3)),
                                               tol=1e-3)
        self.assertEqual(satisfied, [False, True, True])
        self.assertFalse(c.CheckSatisfied([AutoDiffXd(1.)]))
        self.assertIsInstance(c.CheckSatisfied([x0]), sym.Formula)

        ce.set_description("my favorite constraint")
        self.assertEqual(ce.get_description(), "my favorite constraint")

        def check_bounds(c, A, lb, ub):
            self.assertTrue(np.allclose(c.A(), A))
            self.assertTrue(np.allclose(c.lower_bound(), lb))
            self.assertTrue(np.allclose(c.upper_bound(), ub))

        check_bounds(c, [1.], [2.], [np.inf])
        c.UpdateLowerBound([3.])
        check_bounds(c, [1.], [3.], [np.inf])
        c.UpdateUpperBound([4.])
        check_bounds(c, [1.], [3.], [4.])
        c.set_bounds([-10.], [10.])
        check_bounds(c, [1.], [-10.], [10.])
        c.UpdateCoefficients([10.], [-20.], [-30.])
        check_bounds(c, [10.], [-20.], [-30.])

        check_bounds(ce, [2.], [1.], [1.])
        ce.UpdateCoefficients([10.], [20.])
        check_bounds(ce, [10.], [20.], [20.])

    def test_cost_api(self):
        prog = mp.MathematicalProgram()
        x0, = prog.NewContinuousVariables(1, "x")
        lc = prog.AddLinearCost([1], 2, [x0]).evaluator()
        qc = prog.AddQuadraticCost(0.5 * x0**2 + 2 * x0 + 3).evaluator()

        def check_linear_cost(cost, a, b):
            self.assertTrue(np.allclose(cost.a(), a))
            self.assertTrue(np.allclose(cost.b(), b))

        check_linear_cost(lc, [1.], 2.)
        lc.UpdateCoefficients([10.])
        check_linear_cost(lc, [10.], 0.)

        def check_quadratic_cost(cost, Q, b, c):
            self.assertTrue(np.allclose(cost.Q(), Q))
            self.assertTrue(np.allclose(cost.b(), b))
            self.assertTrue(np.allclose(cost.c(), c))

        check_quadratic_cost(qc, [1.], [2.], 3.)
        qc.UpdateCoefficients([10.], [20.])
        check_quadratic_cost(qc, [10.], [20.], 0)

    def test_eval_binding(self):
        qp = TestQP()
        prog = qp.prog

        x = qp.x
        x_expected = np.array([1., 1.])

        costs = qp.costs
        cost_values_expected = [2., 1.]
        constraints = qp.constraints
        constraint_values_expected = [1., 1., 2., 3.]

        result = mp.Solve(prog)
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

        enum = zip(constraints, constraint_values_expected)
        for (constraint, value_expected) in enum:
            value = result.EvalBinding(constraint)
            self.assertTrue(np.allclose(value, value_expected))
            value = prog.EvalBinding(constraint, x_expected)
            self.assertTrue(np.allclose(value, value_expected))
            value = prog.EvalBindingVectorized(
                constraint,
                np.vstack((x_expected, x_expected)).T)
            a = np.vstack((value_expected, value_expected)).T
            self.assertTrue(
                np.allclose(value,
                            np.vstack((value_expected, value_expected)).T))

        enum = zip(costs, cost_values_expected)
        for (cost, value_expected) in enum:
            value = result.EvalBinding(cost)
            self.assertTrue(np.allclose(value, value_expected))
            value = prog.EvalBinding(cost, x_expected)
            self.assertTrue(np.allclose(value, value_expected))
            value = prog.EvalBindingVectorized(
                cost,
                np.vstack((x_expected, x_expected)).T)
            self.assertTrue(
                np.allclose(value,
                            np.vstack((value_expected, value_expected)).T))

        self.assertIsInstance(result.EvalBinding(costs[0]), np.ndarray)

        # Bindings for `Eval`.
        x_list = (float(1.), AutoDiffXd(1.), sym.Variable("x"))
        T_y_list = (float, AutoDiffXd, sym.Expression)
        evaluator = costs[0].evaluator()
        for x_i, T_y_i in zip(x_list, T_y_list):
            y_i = evaluator.Eval(x=[x_i, x_i])
            self.assertIsInstance(y_i[0], T_y_i)

    def test_get_binding_variable_values(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(3)
        binding1 = prog.AddBoundingBoxConstraint(-1, 1, x[0])
        binding2 = prog.AddLinearEqualityConstraint(x[1] + 2 * x[2], 2)
        x_val = np.array([-2., 1., 2.])
        np.testing.assert_allclose(
            prog.GetBindingVariableValues(binding1, x_val), np.array([-2]))
        np.testing.assert_allclose(
            prog.GetBindingVariableValues(binding2, x_val), np.array([1, 2]))

    def test_matrix_variables(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 2, "x")
        for i in range(2):
            for j in range(2):
                prog.AddLinearConstraint(x[i, j] == 2 * i + j)
        result = mp.Solve(prog)
        xval = result.GetSolution(x)
        for i in range(2):
            for j in range(2):
                self.assertAlmostEqual(xval[i, j], 2 * i + j)
                self.assertEqual(xval[i, j], result.GetSolution(x[i, j]))
        # Just check spelling.
        y = prog.NewIndeterminates(2, 2, "y")

    def test_sdp(self):
        prog = mp.MathematicalProgram()
        S = prog.NewSymmetricContinuousVariables(3, "S")
        prog.AddLinearConstraint(S[0, 1] >= 1)
        prog.AddPositiveSemidefiniteConstraint(S)
        prog.AddPositiveSemidefiniteConstraint(S + S)
        prog.AddLinearCost(np.trace(S))
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())
        S = result.GetSolution(S)
        eigs = np.linalg.eigvals(S)
        tol = 1e-8
        self.assertTrue(np.all(eigs >= -tol))
        self.assertTrue(S[0, 1] >= -tol)

    def test_nonnegative_polynomial(self):
        # Only check if the API works.
        prog = mp.MathematicalProgram()
        x = prog.NewIndeterminates(3, "x")
        (poly1, grammian1) = prog.NewNonnegativePolynomial(
            indeterminates=sym.Variables(x),
            degree=4,
            type=mp.MathematicalProgram.NonnegativePolynomial.kSdsos)
        self.assertIsInstance(poly1, sym.Polynomial)
        self.assertIsInstance(grammian1, np.ndarray)

        grammian2 = prog.NewSymmetricContinuousVariables(2)
        poly2 = prog.NewNonnegativePolynomial(
            grammian=grammian2,
            monomial_basis=(sym.Monomial(x[0]), sym.Monomial(x[1])),
            type=mp.MathematicalProgram.NonnegativePolynomial.kDsos)
        self.assertIsInstance(grammian2, np.ndarray)

        poly3, grammian3 = prog.NewNonnegativePolynomial(
            monomial_basis=(sym.Monomial(x[0]), sym.Monomial(x[1])),
            type=mp.MathematicalProgram.NonnegativePolynomial.kSos)
        self.assertIsInstance(poly3, sym.Polynomial)
        self.assertIsInstance(grammian3, np.ndarray)

    def test_sos(self):
        # Find a,b,c,d subject to
        # a(0) + a(1)*x,
        # b(0) + 2*b(1)*x + b(2)*x^2 is SOS,
        # c(0)*x^2 + 2*c(1)*x*y + c(2)*y^2 is SOS,
        # d(0)*x^2 is SOS.
        # d(1)*x^2 is SOS.
        # d(0) + d(1) = 1
        prog = mp.MathematicalProgram()
        x = prog.NewIndeterminates(1, "x")
        self.assertEqual(prog.indeterminates_index()[x[0].get_id()], 0)
        poly = prog.NewFreePolynomial(sym.Variables(x), 1)
        (poly,
         binding) = prog.NewSosPolynomial(indeterminates=sym.Variables(x),
                                          degree=2)
        even_poly = prog.NewEvenDegreeFreePolynomial(sym.Variables(x), 2)
        odd_poly = prog.NewOddDegreeFreePolynomial(sym.Variables(x), 3)
        y = prog.NewIndeterminates(1, "y")
        self.assertEqual(prog.indeterminates_index()[y[0].get_id()], 1)
        (poly,
         binding) = prog.NewSosPolynomial(monomial_basis=(sym.Monomial(x[0]),
                                                          sym.Monomial(y[0])))
        d = prog.NewContinuousVariables(2, "d")
        prog.AddSosConstraint(d[0] * x.dot(x))
        prog.AddSosConstraint(d[1] * x.dot(x), [sym.Monomial(x[0])])
        prog.AddLinearEqualityConstraint(d[0] + d[1] == 1)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())
        result.GetSolution(poly)

        (poly, Q_oo, Q_ee) = prog.NewEvenDegreeSosPolynomial(
            indeterminates=sym.Variables(x), degree=2)
        (poly, Q_oo, Q_ee) = prog.NewEvenDegreeSdsosPolynomial(
            indeterminates=sym.Variables(x), degree=2)
        (poly, Q_oo, Q_ee) = prog.NewEvenDegreeDsosPolynomial(
            indeterminates=sym.Variables(x), degree=2)

    def test_make_polynomial(self):
        prog = mp.MathematicalProgram()
        x = prog.NewIndeterminates(1, "x")[0]
        a = prog.NewContinuousVariables(1, "a")[0]
        # e = (a + 1)x² + 2ax + 3a.
        e = (a + 1) * (x * x) + (2 * a) * x + 3 * a

        # We create a polynomial of `e` via MakePolynomial.
        p = prog.MakePolynomial(e)
        # Check its indeterminates and decision variables are correctly set,
        self.assertEqual(p.indeterminates().size(), 1)
        self.assertTrue(p.indeterminates().include(x))
        self.assertEqual(p.decision_variables().size(), 1)
        self.assertTrue(p.decision_variables().include(a))
        # Check if it holds the same expression when converted back to
        # symbolic expression.
        self.assertTrue(p.ToExpression().EqualTo(e))

    def test_reparse(self):
        prog = mp.MathematicalProgram()
        x = prog.NewIndeterminates(1, "x")[0]
        a = prog.NewContinuousVariables(1, "a")[0]
        e = (a + 1) * (x * x) + (2 * a) * x + 3 * a

        # p = (x^2 + 2x + 3)a + x^2 with indeterminates {a}.
        p = sym.Polynomial(e, [a])
        self.assertEqual(p.TotalDegree(), 1)

        # p = (a + 1)x² + 2ax + 3a with indeterminates {x}.
        prog.Reparse(p)
        self.assertEqual(p.TotalDegree(), 2)

    def test_equality_between_polynomials(self):
        prog = mp.MathematicalProgram()
        x = prog.NewIndeterminates(1, "x")
        a = prog.NewContinuousVariables(2, "a")
        prog.AddEqualityConstraintBetweenPolynomials(
            sym.Polynomial(2 * a[0] * x[0] + a[1] + 2, x),
            sym.Polynomial(2 * x[0] + 4, x))
        result = mp.Solve(prog)
        a_val = result.GetSolution(a)
        self.assertAlmostEqual(a_val[0], 1)
        self.assertAlmostEqual(a_val[1], 2)

    def test_log_determinant(self):
        # Find the minimal ellipsoid that covers some given points.
        prog = mp.MathematicalProgram()
        X = prog.NewSymmetricContinuousVariables(2)
        pts = np.array([[1, 1], [1, -1], [-1, 1]])
        for i in range(3):
            pt = pts[i, :]
            prog.AddLinearConstraint(pt.dot(X.dot(pt)) <= 1)
        prog.AddMaximizeLogDeterminantSymmetricMatrixCost(X)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

    def test_maximize_geometric_mean(self):
        # Find the smallest axis-algined ellipsoid that covers some given
        # points.
        prog = mp.MathematicalProgram()
        a = prog.NewContinuousVariables(2)
        pts = np.array([[1, 1], [1, -1], [-1, 1]])
        for i in range(3):
            pt = pts[i, :]
            prog.AddLinearConstraint(pt.dot(a * pt) <= 1)
        prog.AddMaximizeGeometricMeanCost(a, 1)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

    def test_max_geometric_mean_trivial(self):
        # Solve the trivial problem.
        # max (2x+3)*(3x+2)
        # s.t 2x+3 >= 0
        #     3x+2 >= 0
        #     x <= 10
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1)
        prog.AddLinearConstraint(x[0] <= 10)
        A = np.array([2, 3])
        b = np.array([3, 2])
        prog.AddMaximizeGeometricMeanCost(A, b, x)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

    def test_lcp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 'x')
        M = np.array([[1, 3], [4, 1]])
        q = np.array([-16, -15])
        binding = prog.AddLinearComplementarityConstraint(M, q, x)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())
        self.assertIsInstance(binding.evaluator(),
                              mp.LinearComplementarityConstraint)

    def test_linear_constraints(self):
        # TODO(eric.cousineau): Add more general tests
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 'x')
        lb = [0., 0.]
        ub = [1., 1.]
        prog.AddBoundingBoxConstraint(lb, ub, x)
        prog.AddBoundingBoxConstraint(0., 1., x[0])
        prog.AddBoundingBoxConstraint(0., 1., x)
        prog.AddLinearConstraint(A=np.eye(2),
                                 lb=np.zeros(2),
                                 ub=np.ones(2),
                                 vars=x)
        prog.AddLinearConstraint(e=x[0], lb=0, ub=1)
        prog.AddLinearConstraint(v=x, lb=[0, 0], ub=[1, 1])
        prog.AddLinearConstraint(f=(x[0] == 0))

        prog.AddLinearEqualityConstraint(np.eye(2), np.zeros(2), x)
        prog.AddLinearEqualityConstraint(x[0] == 1)
        prog.AddLinearEqualityConstraint(x[0] + x[1], 1)
        prog.AddLinearEqualityConstraint(2 * x[:2] + np.array([0, 1]),
                                         np.array([3, 2]))

    def test_constraint_set_bounds(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")

        def constraint(x):
            return x[1]**2

        binding = prog.AddConstraint(constraint, [0], [1], vars=x)
        self.assertIsInstance(binding.evaluator(), PyFunctionConstraint)
        np.testing.assert_array_equal(binding.evaluator().lower_bound(),
                                      np.array([0.]))
        np.testing.assert_array_equal(binding.evaluator().upper_bound(),
                                      np.array([1.]))
        # Test UpdateLowerBound()
        binding.evaluator().UpdateLowerBound(new_lb=[-1.])
        np.testing.assert_array_equal(binding.evaluator().lower_bound(),
                                      np.array([-1.]))
        np.testing.assert_array_equal(binding.evaluator().upper_bound(),
                                      np.array([1.]))
        # Test UpdateLowerBound()
        binding.evaluator().UpdateUpperBound(new_ub=[2.])
        np.testing.assert_array_equal(binding.evaluator().lower_bound(),
                                      np.array([-1.]))
        np.testing.assert_array_equal(binding.evaluator().upper_bound(),
                                      np.array([2.]))
        # Test set_bounds()
        binding.evaluator().set_bounds(lower_bound=[-3.], upper_bound=[4.])
        np.testing.assert_array_equal(binding.evaluator().lower_bound(),
                                      np.array([-3.]))
        np.testing.assert_array_equal(binding.evaluator().upper_bound(),
                                      np.array([4.]))

    def test_constraint_gradient_sparsity(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")

        def cost(x):
            return x[0]**2

        def constraint(x):
            return x[1]**2

        cost_binding = prog.AddCost(cost, vars=x)
        constraint_binding = prog.AddConstraint(constraint, [0], [1], vars=x)
        cost_evaluator = cost_binding.evaluator()
        constraint_evaluator = constraint_binding.evaluator()
        self.assertIsNone(cost_evaluator.gradient_sparsity_pattern())
        self.assertIsNone(constraint_evaluator.gradient_sparsity_pattern())
        # Now set the sparsity
        cost_evaluator.SetGradientSparsityPattern([(0, 0)])
        self.assertEqual(cost_evaluator.gradient_sparsity_pattern(), [(0, 0)])
        constraint_binding.evaluator().SetGradientSparsityPattern([(0, 1)])
        self.assertEqual(constraint_evaluator.gradient_sparsity_pattern(),
                         [(0, 1)])

    def test_pycost_and_pyconstraint(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')

        def cost(x):
            return (x[0] - 1.) * (x[0] - 1.)

        def constraint(x):
            return x

        cost_binding = prog.AddCost(cost, vars=x)
        constraint_binding = prog.AddConstraint(constraint,
                                                lb=[0.],
                                                ub=[2.],
                                                vars=x)
        result = mp.Solve(prog)
        xstar = result.GetSolution(x)
        self.assertAlmostEqual(xstar[0], 1.)

        # Verify that they can be evaluated.
        self.assertAlmostEqual(cost_binding.evaluator().Eval(xstar), 0.)
        self.assertAlmostEqual(constraint_binding.evaluator().Eval(xstar), 1.)
        self.assertEqual(len(prog.generic_constraints()), 1)
        self.assertEqual(prog.generic_constraints()[0].evaluator(),
                         constraint_binding.evaluator())
        self.assertEqual(len(prog.generic_costs()), 1)
        self.assertEqual(prog.generic_costs()[0].evaluator(),
                         cost_binding.evaluator())

    def get_different_scalar_type(self, T):
        # Gets U such that U != T.
        next_index = SCALAR_TYPES.index(T) + 1
        U = SCALAR_TYPES[next_index % len(SCALAR_TYPES)]
        self.assertNotEqual(U, T)
        return U

    def test_pycost_wrap_error(self):
        """Tests for checks using PyFunctionCost::Wrap."""

        # TODO(eric.cousineau): It would be nice to not need a
        # MathematicalProgram to test these.

        def user_cost_bad_shape(x):
            # WARNING: This should return a scalar, not a vector!
            return x

        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')
        binding_bad_shape = prog.AddCost(user_cost_bad_shape, vars=x)

        for T in SCALAR_TYPES:
            array_T = np.vectorize(T)
            x0 = array_T([0.])
            x0_bad = array_T([0., 1.])
            # Bad input (before function is called).
            if kDrakeAssertIsArmed:
                # See note in `WrapUserFunc`.
                input_error_cls = SystemExit
                input_error_expected = (
                    "x.rows() == num_vars_ || num_vars_ == Eigen::Dynamic")
            else:
                input_error_cls = RuntimeError
                input_error_expected = (
                    "PyFunctionCost: Input must be of .ndim = 1 or 2 (vector) "
                    "and .size = 1. Got .ndim = 1 and .size = 2 instead.")
            with self.assertRaises(input_error_cls) as cm:
                binding_bad_shape.evaluator().Eval(x0_bad)
            self.assertIn(input_error_expected, str(cm.exception))
            # Bad output shape.
            with self.assertRaises(RuntimeError) as cm:
                binding_bad_shape.evaluator().Eval(x0)
            self.assertEqual(
                str(cm.exception),
                "PyFunctionCost: Output must be of .ndim = 0 (scalar) and "
                ".size = 1. Got .ndim = 1 and .size = 1 instead.")

            # Bad output dtype.
            U = self.get_different_scalar_type(T)

            def user_cost_bad_dtype(x):
                # WARNING: This should return the same dtype as x!
                return U(0.)

            binding_bad_dtype = prog.AddCost(user_cost_bad_dtype, vars=x)
            with self.assertRaises(RuntimeError) as cm:
                binding_bad_dtype.evaluator().Eval(x0)
            self.assertEqual(
                str(cm.exception),
                f"PyFunctionCost: Output must be of scalar type {T.__name__}. "
                f"Got {U.__name__} instead.")

    def test_pyconstraint_wrap_error(self):
        """Tests for checks using PyFunctionConstraint::Wrap."""

        # TODO(eric.cousineau): It would be nice to not need a
        # MathematicalProgram to test these.

        def user_constraint_bad_shape(x):
            # WARNING: This should return a vector, not a scalar!
            return x[0]

        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')
        binding_bad_shape = prog.AddConstraint(user_constraint_bad_shape,
                                               lb=[0.],
                                               ub=[2.],
                                               vars=x)

        for T in SCALAR_TYPES:
            array_T = np.vectorize(T)
            x0 = array_T([0.])
            x0_bad = array_T([0., 1.])
            # Bad input (before function is called).
            if kDrakeAssertIsArmed:
                # See note in `WrapUserFunc`.
                input_error_cls = SystemExit
                input_error_expected = (
                    "x.rows() == num_vars_ || num_vars_ == Eigen::Dynamic")
            else:
                input_error_cls = RuntimeError
                input_error_expected = (
                    "PyFunctionConstraint: Input must be of .ndim = 1 or 2 "
                    "(vector) and .size = 1. Got .ndim = 1 and .size = 2 "
                    "instead.")
            with self.assertRaises(input_error_cls) as cm:
                binding_bad_shape.evaluator().Eval(x0_bad)
            self.assertIn(input_error_expected, str(cm.exception))
            # Bad output.
            with self.assertRaises(RuntimeError) as cm:
                binding_bad_shape.evaluator().Eval(x0)
            self.assertEqual(
                str(cm.exception),
                "PyFunctionConstraint: Output must be of .ndim = 1 or 2 "
                "(vector) and .size = 1. Got .ndim = 0 and .size = 1 instead.")

            # Bad output dtype.
            U = self.get_different_scalar_type(T)

            def user_constraint_bad_dtype(x):
                # WARNING: This should return the same dtype as x!
                return [U(0.)]

            binding_bad_dtype = prog.AddConstraint(user_constraint_bad_dtype,
                                                   lb=[0.],
                                                   ub=[2.],
                                                   vars=x)
            with self.assertRaises(RuntimeError) as cm:
                binding_bad_dtype.evaluator().Eval(x0)
            self.assertEqual(
                str(cm.exception),
                f"PyFunctionConstraint: Output must be of scalar type "
                f"{T.__name__}. Got {U.__name__} instead.")

    def test_addcost_symbolic(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')
        prog.AddCost((x[0] - 1.)**2)
        prog.AddConstraint(0 <= x[0])
        prog.AddConstraint(x[0] <= 2)
        result = mp.Solve(prog)
        self.assertAlmostEqual(result.GetSolution(x)[0], 1.)

    def test_addconstraint_matrix(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')
        prog.AddConstraint(np.array([[x[0] <= 2], [x[0] >= -2]]))
        result = mp.Solve(prog)
        self.assertTrue(result.GetSolution(x)[0] <= 2)
        self.assertTrue(result.GetSolution(x)[0] >= -2)

    def test_initial_guess(self):
        prog = mp.MathematicalProgram()
        count = 6
        shape = (2, 3)
        x = prog.NewContinuousVariables(count, 'x')
        x_matrix = x.reshape(shape)
        x0 = np.arange(count)
        x0_matrix = x0.reshape(shape)
        all_nan = np.full(x.shape, np.nan)
        self.assertTrue(np.isnan(prog.GetInitialGuess(x)).all())

        def check_and_reset():
            self.assertTrue((prog.GetInitialGuess(x) == x0).all())
            self.assertTrue(
                (prog.GetInitialGuess(x_matrix) == x0_matrix).all())
            prog.SetInitialGuess(x, all_nan)
            self.assertTrue(np.isnan(prog.GetInitialGuess(x)).all())

        # Test setting individual variables
        for i in range(count):
            prog.SetInitialGuess(x[i], x0[i])
            self.assertEqual(prog.GetInitialGuess(x[i]), x0[i])
        check_and_reset()

        # Test setting matrix values using both
        # 1d and 2d np arrays.
        prog.SetInitialGuess(x, x0)
        check_and_reset()
        prog.SetInitialGuess(x_matrix, x0_matrix)
        check_and_reset()

        # Test setting all values at once.
        prog.SetInitialGuessForAllVariables(x0)
        check_and_reset()

        # Check an extrinsic guess.  We sanity check changes to the guess using
        # loose "any" and "all" predicates rather than specific indices because
        # we should not presume how variables map into indices.
        guess = np.ndarray(count)
        guess.fill(np.nan)
        self.assertTrue(all([np.isnan(i) for i in guess]))
        prog.SetDecisionVariableValueInVector(x[0], x0[0], guess)
        self.assertFalse(all([np.isnan(i) for i in guess]))
        self.assertTrue(any([np.isnan(i) for i in guess]))
        prog.SetDecisionVariableValueInVector(x_matrix, x0_matrix, guess)
        self.assertFalse(any([np.isnan(i) for i in guess]))

    @unittest.skipIf(SNOPT_NO_GUROBI,
                     "SNOPT is unable to solve this problem (#10653).")
    def test_lorentz_cone_constraint(self):
        # Set Up Mathematical Program
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        z = prog.NewContinuousVariables(1, "z")
        prog.AddCost(z[0])

        # Add LorentzConeConstraints
        prog.AddLorentzConeConstraint(
            np.array([0 * x[0] + 1, x[0] - 1, x[1] - 1]))
        prog.AddLorentzConeConstraint(np.array([z[0], x[0], x[1]]))

        # Test result
        # The default initial guess is [0, 0, 0]. This initial guess is bad
        # because LorentzConeConstraint with eval_type=kConvex is not
        # differentiable at [0, 0, 0]. Use initial guess [0.5, 0.5, 0.5]
        # instead.
        result = mp.Solve(prog, [0.5, 0.5, 0.5])
        self.assertTrue(result.is_success())

        # Check answer
        x_expected = np.array([1 - 2**(-0.5), 1 - 2**(-0.5)])
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

    def test_add_lorentz_cone_constraint(self):
        # Call AddLorentzConeConstraint, make sure no error is thrown.
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(3)

        prog.AddLorentzConeConstraint(
            v=np.array([x[0] + 1, x[1] + x[2], 2 * x[1]]))
        prog.AddLorentzConeConstraint(linear_expression=x[0] + x[1] + 1,
                                      quadratic_expression=x[0] * x[0] +
                                      x[1] * x[1] + 2 * x[0] * x[1] + 1,
                                      tol=0.)
        A = np.array([[1, 0], [0, 1], [1, 0], [0, 0]])
        b = np.array([1, 1, 0, 2])
        constraint = prog.AddLorentzConeConstraint(A=A, b=b, vars=x[:2])
        np.testing.assert_allclose(constraint.evaluator().A().todense(), A)
        np.testing.assert_allclose(constraint.evaluator().b(), b)

    def test_add_rotated_lorentz_cone_constraint(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(3)

        A = np.array([[1, 0], [1, 1], [1, 0], [0, 1], [0, 0]])
        b = np.array([1, 0, 1, 0, 2])
        constraint = prog.AddRotatedLorentzConeConstraint(A=A, b=b, vars=x[:2])
        np.testing.assert_allclose(constraint.evaluator().A().todense(), A)
        np.testing.assert_allclose(constraint.evaluator().b(), b)

        prog.AddRotatedLorentzConeConstraint(
            v=[x[0] + 1, x[0] + x[1], x[0], x[2] + 1, 2])
        constraint = prog.AddRotatedLorentzConeConstraint(
            linear_expression1=x[0] + 1,
            linear_expression2=x[0] + x[1],
            quadratic_expression=x[0] * x[0] + 2 * x[0] + x[1] * x[1] + 5)

    def test_solver_options(self):
        prog = mp.MathematicalProgram()

        prog.SetSolverOption(SolverType.kGurobi, "double_key", 1.0)
        prog.SetSolverOption(GurobiSolver().solver_id(), "int_key", 2)
        prog.SetSolverOption(SolverType.kGurobi, "string_key", "3")

        options = prog.GetSolverOptions(SolverType.kGurobi)
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })
        options = prog.GetSolverOptions(GurobiSolver().solver_id())
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })

        # For now, just make sure the constructor exists.  Once we bind more
        # accessors, we can test them here.
        options_object = SolverOptions()
        solver_id = SolverId("dummy")
        self.assertEqual(solver_id.name(), "dummy")
        options_object.SetOption(solver_id, "double_key", 1.0)
        options_object.SetOption(solver_id, "int_key", 2)
        options_object.SetOption(solver_id, "string_key", "3")
        options = options_object.GetOptions(solver_id)
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })

        prog.SetSolverOptions(options_object)
        prog_options = prog.GetSolverOptions(solver_id)
        self.assertDictEqual(prog_options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })

    def test_infeasible_constraints(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1)
        result = mp.Solve(prog)

        infeasible = result.GetInfeasibleConstraints(prog)
        self.assertEqual(len(infeasible), 0)

        infeasible = result.GetInfeasibleConstraints(prog, tol=1e-4)
        self.assertEqual(len(infeasible), 0)

        infeasible_names = result.GetInfeasibleConstraintNames(prog=prog,
                                                               tol=1e-4)
        self.assertEqual(len(infeasible_names), 0)

    def test_add_indeterminates_and_decision_variables(self):
        prog = mp.MathematicalProgram()
        x0 = sym.Variable("x0")
        x1 = sym.Variable("x1")
        a0 = sym.Variable("a0")
        a1 = sym.Variable("a1")
        prog.AddIndeterminates(np.array([x0, x1]))
        prog.AddDecisionVariables(np.array([a0, a1]))
        numpy_compare.assert_equal(prog.decision_variables()[0], a0)
        numpy_compare.assert_equal(prog.decision_variables()[1], a1)
        numpy_compare.assert_equal(prog.indeterminates()[0], x0)
        numpy_compare.assert_equal(prog.indeterminate(1), x1)
Пример #16
0
class HybridModelPredictiveController(object):

    def __init__(self, S, N, Q, R, P, X_N):

        # store inputs
        self.S = S
        self.N = N
        self.Q = Q
        self.R = R
        self.P = P
        self.X_N = X_N

        # mpMIQP
        self.build_mpmiqp()

    def build_mpmiqp(self):

        # express the constrained dynamics as a list of polytopes in the (x,u,x+)-space
        P = graph_representation(self.S)
        m = big_m(P)

        # initialize program
        self.prog = MathematicalProgram()
        self.x = []
        self.u = []
        self.d = []
        obj = 0.
        self.binaries_lower_bound = []

        # initial conditions (set arbitrarily to zero in the building phase)
        self.x.append(self.prog.NewContinuousVariables(self.S.nx))
        self.initial_condition = []
        for k in range(self.S.nx):
            self.initial_condition.append(self.prog.AddLinearConstraint(self.x[0][k] == 0.).evaluator())

        # loop over time
        for t in range(self.N):

            # create input, mode and next state variables
            self.u.append(self.prog.NewContinuousVariables(self.S.nu))
            self.d.append(self.prog.NewBinaryVariables(self.S.nm))
            self.x.append(self.prog.NewContinuousVariables(self.S.nx))
            
            # enforce constrained dynamics (big-m methods)
            xux = np.concatenate((self.x[t], self.u[t], self.x[t+1]))
            for i in range(self.S.nm):
                mi_sum = np.sum([m[i][j] * self.d[t][j] for j in range(self.S.nm) if j != i], axis=0)
                for k in range(P[i].A.shape[0]):
                    self.prog.AddLinearConstraint(P[i].A[k].dot(xux) <= P[i].b[k] + mi_sum[k])

            # SOS1 on the binaries
            self.prog.AddLinearConstraint(sum(self.d[t]) == 1.)

            # stage cost to the objective
            obj += .5 * self.u[t].dot(self.R).dot(self.u[t])
            obj += .5 * self.x[t].dot(self.Q).dot(self.x[t])

        # terminal constraint
        for k in range(self.X_N.A.shape[0]):
            self.prog.AddLinearConstraint(self.X_N.A[k].dot(self.x[self.N]) <= self.X_N.b[k])

        # terminal cost
        obj += .5 * self.x[self.N].dot(self.P).dot(self.x[self.N])
        self.objective = self.prog.AddQuadraticCost(obj)

        # set solver
        self.solver = GurobiSolver()
        self.prog.SetSolverOption(self.solver.solver_type(), 'OutputFlag', 1)


    def set_initial_condition(self, x0):
        for k, c in enumerate(self.initial_condition):
            c.UpdateLowerBound(x0[k:k+1])
            c.UpdateUpperBound(x0[k:k+1])

    def feedforward(self, x0):

        # overwrite initial condition
        self.set_initial_condition(x0)

        # solve MIQP
        result = self.solver.Solve(self.prog)

        # check feasibility
        if result != SolutionResult.kSolutionFound:
            return None, None, None, None

        # get cost
        obj = self.prog.EvalBindingAtSolution(self.objective)[0]

        # store argmin in list of vectors
        u = [self.prog.GetSolution(ut) for ut in self.u]
        x = [self.prog.GetSolution(xt) for xt in self.x]
        d = [self.prog.GetSolution(dt) for dt in self.d]

        # retrieve mode sequence and check integer feasibility
        ms = [np.argmax(dt) for dt in d]

        return u, x, ms, obj


    def feedback(self, x0):

        # get feedforward and extract first input
        u_feedforward = self.feedforward(x0)[0]
        if u_feedforward is None:
            return None

        return u_feedforward[0]
Пример #17
0
    def compute_input(self, x, xd, initial_guess=None):
        prog = MathematicalProgram()

        # Joint configuration states & Contact forces
        q = prog.NewContinuousVariables(rows=self.T + 1,
                                        cols=self.nq,
                                        name='q')
        v = prog.NewContinuousVariables(rows=self.T + 1,
                                        cols=self.nq,
                                        name='v')
        u = prog.NewContinuousVariables(rows=self.T, cols=self.nu, name='u')
        contact = prog.NewContinuousVariables(rows=self.T,
                                              cols=self.nf,
                                              name='lambda1')

        z = prog.NewBinaryVariables(rows=self.T, cols=self.nf, name='z')

        # Add Initial Condition Constraint
        prog.AddConstraint(eq(q[0], np.array(x[0:3])))
        prog.AddConstraint(eq(v[0], np.array(x[3:6])))

        # Add Final Condition Constraint
        prog.AddConstraint(eq(q[self.T], np.array(xd[0:3])))
        prog.AddConstraint(eq(v[self.T], np.array(xd[3:6])))

        prog.AddConstraint(z[0, 0] == 0)
        prog.AddConstraint(z[0, 1] == 0)

        # Add Dynamics Constraints
        for t in range(self.T):
            # Add Dynamics Constraints
            prog.AddConstraint(
                eq(q[t + 1], (q[t] + self.sim.params['h'] * v[t + 1])))

            prog.AddConstraint(v[t + 1, 0] == (
                v[t, 0] + self.sim.params['h'] *
                (-self.sim.params['c'] * v[t, 0] - contact[t, 0] + u[t, 0])))
            prog.AddConstraint(v[t + 1,
                                 1] == (v[t, 1] + self.sim.params['h'] *
                                        (-self.sim.params['c'] * v[t, 1] +
                                         contact[t, 0] - contact[t, 1])))
            prog.AddConstraint(v[t + 1, 2] == (
                v[t, 2] + self.sim.params['h'] *
                (-self.sim.params['c'] * v[t, 2] + contact[t, 1] + u[t, 1])))

            # Add Contact Constraints with big M = self.contact
            prog.AddConstraint(ge(contact[t], 0))
            prog.AddConstraint(contact[t, 0] + self.sim.params['k'] *
                               (q[t, 1] - q[t, 0] - self.sim.params['d']) >= 0)
            prog.AddConstraint(contact[t, 1] + self.sim.params['k'] *
                               (q[t, 2] - q[t, 1] - self.sim.params['d']) >= 0)

            # Mixed Integer Constraints
            M = self.contact_max
            prog.AddConstraint(contact[t, 0] <= M)
            prog.AddConstraint(contact[t, 1] <= M)
            prog.AddConstraint(contact[t, 0] <= M * z[t, 0])
            prog.AddConstraint(contact[t, 1] <= M * z[t, 1])
            prog.AddConstraint(
                contact[t, 0] + self.sim.params['k'] *
                (q[t, 1] - q[t, 0] - self.sim.params['d']) <= M *
                (1 - z[t, 0]))
            prog.AddConstraint(
                contact[t, 1] + self.sim.params['k'] *
                (q[t, 2] - q[t, 1] - self.sim.params['d']) <= M *
                (1 - z[t, 1]))
            prog.AddConstraint(z[t, 0] + z[t, 1] == 1)

            # Add Input Constraints. Contact Constraints already enforced in big-M
            # prog.AddConstraint(le(u[t], self.input_max))
            # prog.AddConstraint(ge(u[t], -self.input_max))

            # Add Costs
            prog.AddCost(u[t].dot(u[t]))

        # Set Initial Guess as empty. Otherwise, start from last solver iteration.
        if (type(initial_guess) == type(None)):
            initial_guess = np.empty(prog.num_vars())

            # Populate initial guess by linearly interpolating between initial
            # and final states
            #qinit = np.linspace(x[0:3], xd[0:3], self.T + 1)
            qinit = np.tile(np.array(x[0:3]), (self.T + 1, 1))
            vinit = np.tile(np.array(x[3:6]), (self.T + 1, 1))
            uinit = np.tile(np.array([0, 0]), (self.T, 1))
            finit = np.tile(np.array([0, 0]), (self.T, 1))

            prog.SetDecisionVariableValueInVector(q, qinit, initial_guess)
            prog.SetDecisionVariableValueInVector(v, vinit, initial_guess)
            prog.SetDecisionVariableValueInVector(u, uinit, initial_guess)
            prog.SetDecisionVariableValueInVector(contact, finit,
                                                  initial_guess)

        # Solve the program
        if (self.solver == "ipopt"):
            solver_id = IpoptSolver().solver_id()
        elif (self.solver == "snopt"):
            solver_id = SnoptSolver().solver_id()
        elif (self.solver == "osqp"):
            solver_id = OsqpSolver().solver_id()
        elif (self.solver == "mosek"):
            solver_id = MosekSolver().solver_id()
        elif (self.solver == "gurobi"):
            solver_id = GurobiSolver().solver_id()

        solver = MixedIntegerBranchAndBound(prog, solver_id)

        #result = solver.Solve(prog, initial_guess)
        result = solver.Solve()

        if result != result.kSolutionFound:
            raise ValueError('Infeasible optimization problem')

        sol = result.GetSolution()
        q_opt = result.GetSolution(q)
        v_opt = result.GetSolution(v)
        u_opt = result.GetSolution(u)
        f_opt = result.GetSolution(contact)

        return sol, q_opt, v_opt, u_opt, f_opt
Пример #18
0
def linear_program(f, A, b, C=None, d=None, tol=1.e-5):
    """
    Solves the linear program min_x f^T x s.t. A x <= b, C x = d.

    Arguments
    ----------
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.
    tol : float
        Maximum value of a residual of an inequality to consider the related constraint active.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the LP.

        Fields
        ----------
        min : float
            Minimum of the LP (None if the problem is unfeasible or unbounded).
        argmin : numpy.ndarray
            Argument that minimizes the LP (None if the problem is unfeasible or unbounded).
        active_set : list of int
            Indices of the active inequallities {i | A_i argmin = b} (None if the problem is unfeasible or unbounded).
        multiplier_inequality : numpy.ndarray
            Lagrange multipliers for the inequality constraints (None if the problem is unfeasible or unbounded).
        multiplier_equality : numpy.ndarray
            Lagrange multipliers for the equality constraints (None if the problem is unfeasible or unbounded or without equality constraints).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # reshape inputs
    if len(f.shape) == 2:
        f = np.reshape(f, f.shape[0])

    # build program
    prog = MathematicalProgram()
    x = prog.NewContinuousVariables(n_x)
    inequalities = []
    for i in range(n_ineq):
        lhs = A[i, :] + 1.e-20 * np.random.rand(
            (n_x)
        )  # drake raises a RuntimeError if the in the expression x does not appear (e.g.: 0 x <= 1)
        rhs = b[i] + 1.e-15 * np.random.rand(
            1
        )  # in case the constraint is 0 x <= 0 the previous trick ends up adding the constraint x <= 0 to the program...
        inequalities.append(prog.AddLinearConstraint(lhs.dot(x) <= rhs))
    for i in range(n_eq):
        prog.AddLinearConstraint(C[i, :].dot(x) == d[i])
    prog.AddLinearCost(f.dot(x))

    # solve
    solver = GurobiSolver()
    prog.SetSolverOption(solver.solver_type(), "OutputFlag", 0)
    result = prog.Solve()

    # initialize output
    sol = {
        'min': None,
        'argmin': None,
        'active_set': None,
        'multiplier_inequality': None,
        'multiplier_equality': None
    }

    if result == SolutionResult.kSolutionFound:
        sol['argmin'] = prog.GetSolution(x).reshape(n_x, 1)
        sol['min'] = f.dot(sol['argmin'])[0]
        sol['active_set'] = np.where(
            A.dot(sol['argmin']) - b > -tol)[0].tolist()

        # retrieve multipliers through KKT conditions
        M = A[sol['active_set'], :].T
        if n_eq > 0:
            M = np.hstack((M, C.T))
        m = np.linalg.pinv(M).dot(-f.reshape(n_x, 1))
        sol['multiplier_inequality'] = np.zeros((n_ineq, 1))
        for i, j in enumerate(sol['active_set']):
            sol['multiplier_inequality'][j, 0] = m[i, :]
        if n_eq > 0:
            sol['multiplier_equality'] = m[len(sol['active_set']):, :]

    return sol
Пример #19
0
class TestMathematicalProgram(unittest.TestCase):
    def test_program_construction(self):
        prog = mp.MathematicalProgram()
        vars = prog.NewContinuousVariables(5, "x")
        self.assertEqual(vars.dtype, sym.Variable)
        vars_all = prog.decision_variables()
        self.assertEqual(vars_all.shape, (5,))

    def test_program_attributes_and_solver_selection(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")

        # Add linear equality constraints; make sure the solver works.
        prog.AddLinearConstraint(x[0] + x[1] == 0)
        prog.AddLinearConstraint(2*x[0] - x[1] == 1)
        solver_id = mp.ChooseBestSolver(prog)
        self.assertEqual(solver_id.name(), "Linear system")
        solver = mp.MakeSolver(solver_id)
        self.assertEqual(solver.solver_id().name(), "Linear system")
        self.assertTrue(solver.AreProgramAttributesSatisfied(prog))
        result = solver.Solve(prog, None, None)
        self.assertTrue(result.is_success())

        # With an inequality constraint added, the "Linear system" solver
        # doesn't work anymore.
        prog.AddLinearConstraint(x[0] >= 0)
        self.assertFalse(solver.AreProgramAttributesSatisfied(prog))
        with self.assertRaises(ValueError):
            solver.Solve(prog, None, None)

        # A different solver will work, though.  We re-use the result object
        # (as a mutable output argument), and make sure that it changes.
        solver_id = mp.ChooseBestSolver(prog)
        self.assertNotEqual(solver_id.name(), "Linear system")
        solver = mp.MakeSolver(solver_id)
        solver.Solve(prog, None, None, result)
        self.assertTrue(result.is_success())
        self.assertEqual(result.get_solver_id().name(), solver_id.name())

    def test_module_level_solve_function_and_result_accessors(self):
        qp = TestQP()
        x_expected = np.array([1, 1])
        result = mp.Solve(qp.prog)
        self.assertTrue(result.is_success())
        self.assertTrue(np.allclose(result.get_x_val(), x_expected))
        self.assertEqual(result.get_solution_result(),
                         mp.SolutionResult.kSolutionFound)
        self.assertEqual(result.get_optimal_cost(), 3.0)
        self.assertTrue(result.get_solver_id().name())
        self.assertEqual(result.GetSolution(qp.x[0]), 1.0)
        self.assertTrue(np.allclose(result.GetSolution(qp.x), x_expected))

    @unittest.skipUnless(GurobiSolver().available(), "Requires Gurobi")
    def test_mixed_integer_optimization(self):
        prog = mp.MathematicalProgram()
        x = prog.NewBinaryVariables(3, "x")
        c = np.array([-1.0, -1.0, -2.0])
        prog.AddLinearCost(c.dot(x))
        a = np.array([1.0, 2.0, 3.0])
        prog.AddLinearConstraint(a.dot(x) <= 4)
        prog.AddLinearConstraint(x[0] + x[1], 1, np.inf)
        self.assertIsNone(prog.GetSolverId())
        result = prog.Solve()
        self.assertEqual(result, mp.SolutionResult.kSolutionFound)
        self.assertIsNotNone(prog.GetSolverId().name())

        # Test that we got the right solution for all x
        x_expected = np.array([1.0, 0.0, 1.0])
        self.assertTrue(np.all(np.isclose(prog.GetSolution(x), x_expected)))

        # Also test by asking for the value of each element of x
        for i in range(3):
            self.assertAlmostEqual(prog.GetSolution(x[i]), x_expected[i])

    def test_qp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        prog.AddLinearConstraint(x[0] >= 1)
        prog.AddLinearConstraint(x[1] >= 1)
        prog.AddQuadraticCost(np.eye(2), np.zeros(2), x)
        # Redundant cost just to check the spelling.
        prog.AddQuadraticErrorCost(vars=x, Q=np.eye(2),
                                   x_desired=np.zeros(2))
        prog.AddL2NormCost(A=np.eye(2), b=np.zeros(2), vars=x)

        result = prog.Solve()
        self.assertEqual(result, mp.SolutionResult.kSolutionFound)

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(prog.GetSolution(x), x_expected))

    def test_symbolic_qp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        prog.AddConstraint(x[0], 1., 100.)
        prog.AddConstraint(x[1] >= 1)
        prog.AddQuadraticCost(x[0]**2 + x[1]**2)
        result = prog.Solve()
        self.assertEqual(result, mp.SolutionResult.kSolutionFound)

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(prog.GetSolution(x), x_expected))

    def test_bindings(self):
        qp = TestQP()
        prog = qp.prog
        x = qp.x

        for binding in prog.GetAllCosts():
            self.assertIsInstance(binding.evaluator(), mp.Cost)
        for binding in prog.GetLinearConstraints():
            self.assertIsInstance(binding.evaluator(), mp.Constraint)
        for binding in prog.GetAllConstraints():
            self.assertIsInstance(binding.evaluator(), mp.Constraint)

        self.assertTrue(prog.linear_costs())
        for (i, binding) in enumerate(prog.linear_costs()):
            cost = binding.evaluator()
            self.assertTrue(np.allclose(cost.a(), np.ones((1, 2))))

        self.assertTrue(prog.quadratic_costs())
        for (i, binding) in enumerate(prog.quadratic_costs()):
            cost = binding.evaluator()
            self.assertTrue(np.allclose(cost.Q(), np.eye(2)))
            self.assertTrue(np.allclose(cost.b(), np.zeros(2)))

        self.assertTrue(prog.bounding_box_constraints())
        for (i, binding) in enumerate(prog.bounding_box_constraints()):
            constraint = binding.evaluator()
            self.assertEqual(
                prog.FindDecisionVariableIndex(binding.variables()[0]),
                prog.FindDecisionVariableIndex(x[i]))
            num_constraints = constraint.num_constraints()
            if num_constraints == 1:
                self.assertEqual(constraint.A(), 1)
                self.assertEqual(constraint.lower_bound(), 1)
                self.assertEqual(constraint.upper_bound(), np.inf)
            else:
                self.assertTrue(np.allclose(constraint.A(), np.eye(2)))
                self.assertTrue(np.allclose(constraint.lower_bound(),
                                            [1, -np.inf]))
                self.assertTrue(np.allclose(constraint.upper_bound(),
                                            [np.inf, 2]))

        self.assertTrue(prog.linear_constraints())
        for (i, binding) in enumerate(prog.linear_constraints()):
            constraint = binding.evaluator()
            self.assertEqual(
                prog.FindDecisionVariableIndex(binding.variables()[0]),
                prog.FindDecisionVariableIndex(x[0]))
            self.assertEqual(
                prog.FindDecisionVariableIndex(binding.variables()[1]),
                prog.FindDecisionVariableIndex(x[1]))
            self.assertTrue(np.allclose(constraint.A(), [3, -1]))
            self.assertTrue(constraint.lower_bound(), -2)
            self.assertTrue(constraint.upper_bound(), np.inf)

        self.assertTrue(prog.linear_equality_constraints())
        for (i, binding) in enumerate(prog.linear_equality_constraints()):
            constraint = binding.evaluator()
            self.assertEqual(
                prog.FindDecisionVariableIndex(binding.variables()[0]),
                prog.FindDecisionVariableIndex(x[0]))
            self.assertEqual(
                prog.FindDecisionVariableIndex(binding.variables()[1]),
                prog.FindDecisionVariableIndex(x[1]))
            self.assertTrue(np.allclose(constraint.A(), [1, 2]))
            self.assertTrue(constraint.lower_bound(), 3)
            self.assertTrue(constraint.upper_bound(), 3)

        result = prog.Solve()
        self.assertEqual(result, mp.SolutionResult.kSolutionFound)

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(prog.GetSolution(x), x_expected))

        # Test deprecated method.
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('once', DrakeDeprecationWarning)
            c = binding.constraint()
            self.assertEqual(len(w), 1)

    def test_constraint_api(self):
        prog = mp.MathematicalProgram()
        x0, = prog.NewContinuousVariables(1, "x")
        c = prog.AddLinearConstraint(x0 >= 2).evaluator()
        ce = prog.AddLinearEqualityConstraint(2*x0, 1).evaluator()

        self.assertTrue(c.CheckSatisfied([2.], tol=1e-3))
        self.assertFalse(c.CheckSatisfied([AutoDiffXd(1.)]))
        self.assertIsInstance(c.CheckSatisfied([x0]), sym.Formula)

        def check_bounds(c, A, lb, ub):
            self.assertTrue(np.allclose(c.A(), A))
            self.assertTrue(np.allclose(c.lower_bound(), lb))
            self.assertTrue(np.allclose(c.upper_bound(), ub))

        check_bounds(c, [1.], [2.], [np.inf])
        c.UpdateLowerBound([3.])
        check_bounds(c, [1.], [3.], [np.inf])
        c.UpdateUpperBound([4.])
        check_bounds(c, [1.], [3.], [4.])
        c.set_bounds([-10.], [10.])
        check_bounds(c, [1.], [-10.], [10.])
        c.UpdateCoefficients([10.], [-20.], [-30.])
        check_bounds(c, [10.], [-20.], [-30.])

        check_bounds(ce, [2.], [1.], [1.])
        ce.UpdateCoefficients([10.], [20.])
        check_bounds(ce, [10.], [20.], [20.])

    def test_cost_api(self):
        prog = mp.MathematicalProgram()
        x0, = prog.NewContinuousVariables(1, "x")
        lc = prog.AddLinearCost(1*x0 + 2).evaluator()
        qc = prog.AddQuadraticCost(0.5*x0**2 + 2*x0 + 3).evaluator()

        def check_linear_cost(cost, a, b):
            self.assertTrue(np.allclose(cost.a(), a))
            self.assertTrue(np.allclose(cost.b(), b))

        check_linear_cost(lc, [1.], 2.)
        lc.UpdateCoefficients([10.])
        check_linear_cost(lc, [10.], 0.)

        def check_quadratic_cost(cost, Q, b, c):
            self.assertTrue(np.allclose(cost.Q(), Q))
            self.assertTrue(np.allclose(cost.b(), b))
            self.assertTrue(np.allclose(cost.c(), c))

        check_quadratic_cost(qc, [1.], [2.], 3.)
        qc.UpdateCoefficients([10.], [20.])
        check_quadratic_cost(qc, [10.], [20.], 0)

    def test_eval_binding(self):
        qp = TestQP()
        prog = qp.prog

        x = qp.x
        x_expected = np.array([1., 1.])

        costs = qp.costs
        cost_values_expected = [2., 1.]
        constraints = qp.constraints
        constraint_values_expected = [1., 1., 2., 3.]

        prog.Solve()
        self.assertTrue(np.allclose(prog.GetSolution(x), x_expected))

        enum = zip(constraints, constraint_values_expected)
        for (constraint, value_expected) in enum:
            value = prog.EvalBindingAtSolution(constraint)
            self.assertTrue(np.allclose(value, value_expected))

        enum = zip(costs, cost_values_expected)
        for (cost, value_expected) in enum:
            value = prog.EvalBindingAtSolution(cost)
            self.assertTrue(np.allclose(value, value_expected))

        # Existence check.
        self.assertIsInstance(
            prog.EvalBinding(costs[0], x_expected), np.ndarray)
        self.assertIsInstance(
            prog.EvalBindings(prog.GetAllConstraints(), x_expected),
            np.ndarray)

        # Bindings for `Eval`.
        x_list = (float(1.), AutoDiffXd(1.), sym.Variable("x"))
        T_y_list = (float, AutoDiffXd, sym.Expression)
        evaluator = costs[0].evaluator()
        for x_i, T_y_i in zip(x_list, T_y_list):
            y_i = evaluator.Eval(x=[x_i, x_i])
            self.assertIsInstance(y_i[0], T_y_i)

    def test_matrix_variables(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 2, "x")
        for i in range(2):
            for j in range(2):
                prog.AddLinearConstraint(x[i, j] == 2 * i + j)
        prog.Solve()
        xval = prog.GetSolution(x)
        for i in range(2):
            for j in range(2):
                self.assertAlmostEqual(xval[i, j], 2 * i + j)
                self.assertEqual(xval[i, j], prog.GetSolution(x[i, j]))
        # Just check spelling.
        y = prog.NewIndeterminates(2, 2, "y")

    def test_sdp(self):
        prog = mp.MathematicalProgram()
        S = prog.NewSymmetricContinuousVariables(3, "S")
        prog.AddLinearConstraint(S[0, 1] >= 1)
        prog.AddPositiveSemidefiniteConstraint(S)
        prog.AddPositiveSemidefiniteConstraint(S+S)
        prog.AddLinearCost(np.trace(S))
        result = prog.Solve()
        self.assertEqual(result, mp.SolutionResult.kSolutionFound)
        S = prog.GetSolution(S)
        eigs = np.linalg.eigvals(S)
        tol = 1e-8
        self.assertTrue(np.all(eigs >= -tol))
        self.assertTrue(S[0, 1] >= -tol)

    def test_sos(self):
        # Find a,b,c,d subject to
        # a(0) + a(1)*x,
        # b(0) + 2*b(1)*x + b(2)*x^2 is SOS,
        # c(0)*x^2 + 2*c(1)*x*y + c(2)*y^2 is SOS,
        # d(0)*x^2 is SOS.
        # d(1)*x^2 is SOS.
        prog = mp.MathematicalProgram()
        x = prog.NewIndeterminates(1, "x")
        poly = prog.NewFreePolynomial(sym.Variables(x), 1)
        (poly, binding) = prog.NewSosPolynomial(sym.Variables(x), 2)
        y = prog.NewIndeterminates(1, "y")
        (poly, binding) = prog.NewSosPolynomial((sym.Monomial(x[0]),
                                                 sym.Monomial(y[0])))
        d = prog.NewContinuousVariables(2, "d")
        prog.AddSosConstraint(d[0]*x.dot(x))
        prog.AddSosConstraint(d[1]*x.dot(x), [sym.Monomial(x[0])])
        result = prog.Solve()
        self.assertEqual(result, mp.SolutionResult.kSolutionFound)

        # Test SubstituteSolution(sym.Expression)
        # TODO(eric.cousineau): Expose `SymbolicTestCase` so that other tests
        # can use the assertion utilities.
        self.assertEqual(
            prog.SubstituteSolution(d[0] + d[1]).Evaluate(),
            prog.GetSolution(d[0]) + prog.GetSolution(d[1]))
        # Test SubstituteSolution(sym.Polynomial)
        poly = d[0]*x.dot(x)
        poly_sub_actual = prog.SubstituteSolution(
            sym.Polynomial(poly, sym.Variables(x)))
        poly_sub_expected = sym.Polynomial(
            prog.SubstituteSolution(d[0])*x.dot(x), sym.Variables(x))
        # TODO(soonho): At present, these must be converted to `Expression` to
        # compare, because as `Polynomial`s the comparison fails with
        # `0*x(0)^2` != `0`, which indicates that simplification is not
        # happening somewhere.
        self.assertTrue(
            poly_sub_actual.ToExpression().EqualTo(
                poly_sub_expected.ToExpression()),
            "{} != {}".format(poly_sub_actual, poly_sub_expected))

    def test_lcp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 'x')
        M = np.array([[1, 3], [4, 1]])
        q = np.array([-16, -15])
        binding = prog.AddLinearComplementarityConstraint(M, q, x)
        result = prog.Solve()
        self.assertEqual(result, mp.SolutionResult.kSolutionFound)
        self.assertIsInstance(binding.evaluator(),
                              mp.LinearComplementarityConstraint)

    def test_linear_constraints(self):
        # TODO(eric.cousineau): Add more general tests
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 'x')
        lb = [0., 0.]
        ub = [1., 1.]
        prog.AddBoundingBoxConstraint(lb, ub, x)
        prog.AddBoundingBoxConstraint(0., 1., x[0])
        prog.AddBoundingBoxConstraint(0., 1., x)
        prog.AddLinearConstraint(np.eye(2), np.zeros(2), np.ones(2), x)

        prog.AddLinearEqualityConstraint(np.eye(2), np.zeros(2), x)
        prog.AddLinearEqualityConstraint(x[0] == 1)
        prog.AddLinearEqualityConstraint(x[0] + x[1], 1)

    def test_pycost_and_pyconstraint(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')

        def cost(x):
            return (x[0]-1.)*(x[0]-1.)

        def constraint(x):
            return x

        prog.AddCost(cost, vars=x)
        prog.AddConstraint(constraint, lb=[0.], ub=[2.], vars=x)
        prog.Solve()
        self.assertAlmostEqual(prog.GetSolution(x)[0], 1.)

    def test_addcost_symbolic(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')
        prog.AddCost((x[0]-1.)**2)
        prog.AddConstraint(0 <= x[0])
        prog.AddConstraint(x[0] <= 2)
        prog.Solve()
        self.assertAlmostEqual(prog.GetSolution(x)[0], 1.)

    def test_initial_guess(self):
        prog = mp.MathematicalProgram()
        count = 6
        shape = (2, 3)
        x = prog.NewContinuousVariables(count, 'x')
        x_matrix = x.reshape(shape)
        x0 = np.arange(count)
        x0_matrix = x0.reshape(shape)
        all_nan = np.full(x.shape, np.nan)
        self.assertTrue(np.isnan(prog.GetInitialGuess(x)).all())

        def check_and_reset():
            self.assertTrue((prog.GetInitialGuess(x) == x0).all())
            self.assertTrue(
                (prog.GetInitialGuess(x_matrix) == x0_matrix).all())
            prog.SetInitialGuess(x, all_nan)
            self.assertTrue(np.isnan(prog.GetInitialGuess(x)).all())

        # Test setting individual variables
        for i in range(count):
            prog.SetInitialGuess(x[i], x0[i])
            self.assertEqual(prog.GetInitialGuess(x[i]), x0[i])
        check_and_reset()

        # Test setting matrix values using both
        # 1d and 2d np arrays.
        prog.SetInitialGuess(x, x0)
        check_and_reset()
        prog.SetInitialGuess(x_matrix, x0_matrix)
        check_and_reset()

        # Test setting all values at once.
        prog.SetInitialGuessForAllVariables(x0)
        check_and_reset()

    @unittest.skipIf(
        SNOPT_NO_GUROBI,
        "SNOPT is unable to solve this problem (#10653).")
    def test_lorentz_cone_constraint(self):
        # Set Up Mathematical Program
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        z = prog.NewContinuousVariables(1, "z")
        prog.AddCost(z[0])

        # Add LorentzConeConstraints
        prog.AddLorentzConeConstraint(np.array([0*x[0]+1, x[0]-1, x[1]-1]))
        prog.AddLorentzConeConstraint(np.array([z[0], x[0], x[1]]))

        # Test result
        result = prog.Solve()
        self.assertEqual(result, mp.SolutionResult.kSolutionFound)

        # Check answer
        x_expected = np.array([1-2**(-0.5), 1-2**(-0.5)])
        self.assertTrue(np.allclose(prog.GetSolution(x), x_expected))

    def test_solver_options(self):
        prog = mp.MathematicalProgram()

        prog.SetSolverOption(SolverType.kGurobi, "double_key", 1.0)
        prog.SetSolverOption(SolverType.kGurobi, "int_key", 2)
        prog.SetSolverOption(SolverType.kGurobi, "string_key", "3")

        options = prog.GetSolverOptions(SolverType.kGurobi)
        self.assertDictEqual(
            options, {"double_key": 1.0, "int_key": 2, "string_key": "3"})

        # For now, just make sure the constructor exists.  Once we bind more
        # accessors, we can test them here.
        options_object = SolverOptions()
Пример #20
0
                                                 SolverId, SolverInterface)

from functools import partial
import unittest
import warnings

import numpy as np

import pydrake
from pydrake.autodiffutils import AutoDiffXd
from pydrake.common.test_utilities import numpy_compare
from pydrake.forwarddiff import jacobian
from pydrake.math import ge
import pydrake.symbolic as sym

SNOPT_NO_GUROBI = SnoptSolver().available() and not GurobiSolver().available()


class TestQP:
    def __init__(self):
        # Create a simple QP that uses all deduced linear constraint types,
        # along with a quadratic and linear cost.
        # The solution should be [1, 1].
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        self.prog = prog
        self.x = x
        self.constraints = [
            # Bounding box
            prog.AddLinearConstraint(x[0] >= 1),
            # Bounding box
Пример #21
0
def mixed_integer_quadratic_program(nc, H, f, A, b, C=None, d=None, **kwargs):
    """
    Solves the strictly convex (H > 0) mixed-integer quadratic program min .5 x' H x + f' x s.t. A x <= b, C x  = d.
    The first nc variables in x are continuous, the remaining are binaries.

    Arguments
    ----------
    nc : int
        Number of continuous variables in the problem.
    H : numpy.ndarray
        Positive definite Hessian of the cost function.
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the MIQP.

        Fields
        ----------
        min : float
            Minimum of the MIQP (None if the problem is unfeasible).
        argmin : numpy.ndarray
            Argument that minimizes the MIQP (None if the problem is unfeasible).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # build program
    prog = MathematicalProgram()
    x = np.hstack((
        prog.NewContinuousVariables(nc),
        prog.NewBinaryVariables(n_x - nc)
        ))
    [prog.AddLinearConstraint(A[i].dot(x) <= b[i]) for i in range(n_ineq)]
    [prog.AddLinearConstraint(C[i].dot(x) == d[i]) for i in range(n_eq)]
    prog.AddQuadraticCost(.5*x.dot(H).dot(x) + f.dot(x))

    # solve
    solver = GurobiSolver()
    prog.SetSolverOption(solver.solver_type(), 'OutputFlag', 0)
    [prog.SetSolverOption(solver.solver_type(), parameter, value) for parameter, value in kwargs.items()]
    result = prog.Solve()

    # initialize output
    sol = {
        'min': None,
        'argmin': None
    }

    if result == SolutionResult.kSolutionFound:
        sol['argmin'] = prog.GetSolution(x)
        sol['min'] = .5*sol['argmin'].dot(H).dot(sol['argmin']) + f.dot(sol['argmin'])

    return sol
Пример #22
0
    def __init__(self, frontend: SlamFrontend):
        """
        Indices (consistent with Indelman2015, aka "the paper".)
            Constants:
            k: current time step.
            L: planning horizon.

            i: time step.
            j: landmark.
            l \in [k, k+L]: one of the future time steps up to L.


        :param X_WB_e_0: np array of shape (self.dim_X). Initial estimated
            robot position, used to anchor the entire trajectory.
        """
        self.solver = GurobiSolver()
        self.X_WB_e_0 = np.zeros(2)

        self.dim_l = 2  # dimension of landmarks.
        self.dim_X = 2  # dimension of poses.
        self.dim_measurements = 2

        # current beliefs
        self.X_WB_e_dict = {0: self.X_WB_e_0}
        self.X_WB_e_var_dict = {}
        self.l_xy_e_dict = dict()

        # Information matrix of current robot poses and landmarks, which are
        # ordered as follows:
        # [X_WB_e0, ... X_WB_e_k, l_xy_e0, ... l_xy_eK]
        # The landmarks are ordered as self.l_xy_e_dict.keys().
        self.I_all = None

        # past measurements
        self.odometry_measurements = dict()

        # key (int): landmark index
        # value: dict of {t: {"distance":d, "bearing": b}}.
        # t: index of robot poses from which the landmark is visible.
        # d: range measurement for the landmark from the pose indexed by t.
        # b: bearing measurement for the landmark from the pose indexed by t.
        self.landmark_measurements = dict()

        # Taking stuff from frontend
        self.frontend = frontend
        self.vis = frontend.vis
        self.l_xy = frontend.l_xy
        # TODO: load from config file?
        self.sigma_odometry = frontend.sigma_odometry
        self.sigma_range = frontend.sigma_range
        self.sigma_bearing = frontend.sigma_bearing

        self.sigma_dynamics = self.sigma_odometry

        self.r_range_max = frontend.r_range_max
        self.r_range_min = frontend.r_range_min

        # weights/selection matrices for the cost function.
        self.M_u = np.ones(self.dim_l) * 0.1

        self.beta = 0.15
        self.alpha_LB = 0.6
        self.is_uncertainty_reduction_only = False
Пример #23
0
class SlamBackend:
    def __init__(self, frontend: SlamFrontend):
        """
        Indices (consistent with Indelman2015, aka "the paper".)
            Constants:
            k: current time step.
            L: planning horizon.

            i: time step.
            j: landmark.
            l \in [k, k+L]: one of the future time steps up to L.


        :param X_WB_e_0: np array of shape (self.dim_X). Initial estimated
            robot position, used to anchor the entire trajectory.
        """
        self.solver = GurobiSolver()
        self.X_WB_e_0 = np.zeros(2)

        self.dim_l = 2  # dimension of landmarks.
        self.dim_X = 2  # dimension of poses.
        self.dim_measurements = 2

        # current beliefs
        self.X_WB_e_dict = {0: self.X_WB_e_0}
        self.X_WB_e_var_dict = {}
        self.l_xy_e_dict = dict()

        # Information matrix of current robot poses and landmarks, which are
        # ordered as follows:
        # [X_WB_e0, ... X_WB_e_k, l_xy_e0, ... l_xy_eK]
        # The landmarks are ordered as self.l_xy_e_dict.keys().
        self.I_all = None

        # past measurements
        self.odometry_measurements = dict()

        # key (int): landmark index
        # value: dict of {t: {"distance":d, "bearing": b}}.
        # t: index of robot poses from which the landmark is visible.
        # d: range measurement for the landmark from the pose indexed by t.
        # b: bearing measurement for the landmark from the pose indexed by t.
        self.landmark_measurements = dict()

        # Taking stuff from frontend
        self.frontend = frontend
        self.vis = frontend.vis
        self.l_xy = frontend.l_xy
        # TODO: load from config file?
        self.sigma_odometry = frontend.sigma_odometry
        self.sigma_range = frontend.sigma_range
        self.sigma_bearing = frontend.sigma_bearing

        self.sigma_dynamics = self.sigma_odometry

        self.r_range_max = frontend.r_range_max
        self.r_range_min = frontend.r_range_min

        # weights/selection matrices for the cost function.
        self.M_u = np.ones(self.dim_l) * 0.1

        self.beta = 0.15
        self.alpha_LB = 0.6
        self.is_uncertainty_reduction_only = False

    def update_landmark_measurements(self, t, idx_visible_l_list,
                                     d_l_measured_list, b_l_measured_list):
        """
        inputs are the output of Frontend.get_range_measurements()
        :return: None.
        """
        for i, i_l in enumerate(idx_visible_l_list):
            d_l = d_l_measured_list[i]
            b_l = b_l_measured_list[i]
            if i_l not in self.landmark_measurements:
                self.landmark_measurements[i_l] = dict()
            self.landmark_measurements[i_l][t] = \
                {"distance": d_l, "bearing": b_l}

    def update_odometry_measruements(self, t, odometry_measurement):
        """

        :param t: current time step (starting from 0).
        :param odometry_measurement: odometry measruement between t and t-1.
        :return:
        """
        self.odometry_measurements[t] = odometry_measurement

    def get_X_WB_belief(self):
        n = len(self.X_WB_e_dict)
        X_WB_e = np.zeros((n, self.dim_X))

        for i in range(n):
            X_WB_e[i] = self.X_WB_e_dict[i]

        return X_WB_e

    def get_l_xy_belief(self):
        n = len(self.l_xy_e_dict)
        l_xy_e = np.zeros((n, self.dim_l))

        for i, k in enumerate(self.l_xy_e_dict.keys()):
            l_xy_e[i] = self.l_xy_e_dict[k]

        return l_xy_e

    def get_odometry_measurements(self):
        n_o = len(self.odometry_measurements)
        odometry_measurements = np.zeros((n_o, self.dim_X))

        for i in range(n_o):
            odometry_measurements[i] = self.odometry_measurements[i + 1]

        return odometry_measurements

    def get_X_WB_initial_guess_array(self):
        """
        Use self.t_xy_e_dict to create an initial guess for the next bundle
            adjustment.
        Let t be the current exploration time step. As t_xy_e_dict is the
            estimate for time step t-1, its shape is (t - 1, 3), which does
            not include an estimate for the pose of the current time step.

        The initial guess for the current time step is computed as
            self.t_xy_e_dict[t - 1] + self.odometry_measurements[t].
        :return:
        """
        n_o = len(self.odometry_measurements)
        X_WB_e = np.zeros((n_o + 1, self.dim_X))
        for i in range(n_o):
            X_WB_e[i] = self.X_WB_e_dict[i]
        if n_o > 0:
            X_WB_e[n_o] = X_WB_e[n_o - 1] + self.odometry_measurements[n_o]

        # First ground truth is given.
        X_WB_e[0] = self.X_WB_e_0

        return X_WB_e

    def get_l_xy_initial_guess_array(self):
        nl = len(self.landmark_measurements)
        l_xy_e = np.ones((nl, 2))

        for i_k, k in enumerate(self.landmark_measurements.keys()):
            if k in self.l_xy_e_dict:
                l_xy_e[i_k] = self.l_xy_e_dict[k]
        return l_xy_e

    def marginalize_info_matrix(self, Omega, n_p, n_l):
        nX = n_p * self.dim_X
        m, n = Omega.shape
        assert m == n
        assert m == nX + n_l * self.dim_l

        A = Omega[:nX, :nX]
        B = Omega[:nX, nX:]
        C = Omega[nX:, nX:]

        return A - B.dot(np.linalg.inv(C)).dot(B.T)

    def calc_range_derivatives(self,
                               J,
                               i_row,
                               j_start_x,
                               j_start_l,
                               d_xy,
                               sigma=1.0):
        """

        :param i_row:
        :param j_start_x:
        :param j_start_l:
        :param d_xy: X_WB - l_xy
        :return:
        """
        if torch.is_tensor(J):
            norm = torch.norm
        else:
            norm = np.linalg.norm

        d = norm(d_xy)
        J[i_row, j_start_x:j_start_x + 2] = d_xy / (d * sigma)
        J[i_row, j_start_l:j_start_l + self.dim_l] = -d_xy / (d * sigma)

        return d

    def calc_bearing_derivatives(self,
                                 J,
                                 i_row,
                                 j_start_x,
                                 j_start_l,
                                 X_WB,
                                 l_xy,
                                 sigma=1.0):
        xb = X_WB[0]
        yb = X_WB[1]
        xl = l_xy[0]
        yl = l_xy[1]
        dx = xl - xb
        dy = yl - yb
        d_arctan_D_dx = 1 / (1 + (dy / dx)**2) * (-dy / dx**2)
        d_arctan_D_dy = 1 / (1 + (dy / dx)**2) * (1 / dx)

        J[i_row, j_start_x] = -d_arctan_D_dx / sigma
        J[i_row, j_start_x + 1] = -d_arctan_D_dy / sigma

        J[i_row, j_start_l] = d_arctan_D_dx / sigma
        J[i_row, j_start_l + 1] = d_arctan_D_dy / sigma

        return dx, dy

    @staticmethod
    def calc_num_landmark_measurements(landmark_measurements):
        nl_measurements = 0  # number of landmark measurements
        for visible_landmarks_i in landmark_measurements.values():
            nl_measurements += len(visible_landmarks_i)
        return nl_measurements

    def calc_jacobian_and_b(self, X_WB_e, l_xy_e):
        """
        :param X_WB_e (n_o + 1, 3)
        :param l_xy_e (nl, 2): coordinates of landmarks, ordered the same way as
            self.landmark_measurements.
        :param landmark_measurements:
        :return:
        """
        dim_l = self.dim_l  # dimension of landmarks.
        dim_X = self.dim_X  # dimension of poses.

        nl = len(self.landmark_measurements)  # number of landmarks
        n_o = len(self.odometry_measurements)  # number of odometries

        # number of landmark measurements
        nl_measurements = self.calc_num_landmark_measurements(
            self.landmark_measurements)

        n_rows = (n_o + 1) * dim_X + nl_measurements * self.dim_measurements
        n_cols = (n_o + 1) * dim_X + nl * dim_l

        J = np.zeros((n_rows, n_cols))
        b = np.zeros(n_rows)
        sigmas = np.zeros(n_rows)

        # prior on first pose.
        J[-self.dim_X:, :self.dim_X] = np.eye(self.dim_X)
        sigmas[-self.dim_X:] = (self.sigma_odometry / 10)

        # odometry
        for i in range(n_o):
            i0 = i * dim_X
            i1 = i0 + dim_X

            J[i0:i1, i0:i1] = -np.eye(dim_X)
            J[i0:i1, i0 + dim_X:i1 + dim_X] = np.eye(dim_X)
            bi = b[i0:i1]
            sigmas_i = sigmas[i0:i1]
            # displacement
            bi[:2] = X_WB_e[i + 1, :2] - X_WB_e[i, :2] - \
                self.odometry_measurements[i + 1][:2]
            sigmas_i[:2] = self.sigma_odometry

        # range and bearing measurements.
        i_row = n_o * dim_X
        for idx_j, (j, visible_robot_poses) in enumerate(
                self.landmark_measurements.items()):
            for i in visible_robot_poses.keys():
                # i: index of robot poses visible from
                d_ik_m = visible_robot_poses[i]["distance"]
                b_ik_m = visible_robot_poses[i]["bearing"]

                # range.
                d_xy = X_WB_e[i, :2] - l_xy_e[idx_j]

                j_start_x = i * dim_X
                j_start_l = (n_o + 1) * dim_X + idx_j * dim_l
                d = self.calc_range_derivatives(J, i_row, j_start_x, j_start_l,
                                                d_xy)

                b[i_row] = d - d_ik_m
                sigmas[i_row] = self.sigma_range

                # bearing.
                i_row += 1
                dx, dy = self.calc_bearing_derivatives(J, i_row, j_start_x,
                                                       j_start_l, X_WB_e[i],
                                                       l_xy_e[idx_j])

                b[i_row] = calc_angle_diff(b_ik_m, np.arctan2(dy, dx))
                sigmas[i_row] = self.sigma_bearing

                i_row += 1

        return J, b, sigmas

    def calc_info_matrix(self, X_WB_e, l_xy_e):
        J, b, sigmas = self.calc_jacobian_and_b(X_WB_e, l_xy_e)
        Omega = 1 / sigmas**2
        I = (J.T * Omega).dot(J)
        q = J.T.dot(Omega * b)
        c = (b * Omega).dot(b)

        return I, q, c

    def calc_A_lower_half(self, dX_WB, l_xy_e):
        """
        Algorithm 4 in the paper.
        :param dX_WB: [l, self.dim_X]. Input for time steps [l : ].
        :return:
        """
        # X_WB_e: belief. e stands for "estimated".
        # X_WB_p: prediction. corresponds to quantities with an overbar.
        n_p = len(self.X_WB_e_dict)
        l = len(dX_WB)

        # future predictions.
        X_WB_p = self.calc_pose_predictions(dX_WB)

        # Find the visible landmarks for every X_WB_p[i].
        # Store in {i: [landmark order indices.]}, i.e. the same order as
        #   self.landmark_measurements.
        # future_landmark_measurements = self.find_visible_landmarks(X_WB_p)
        nl = len(self.landmark_measurements)  # number of landmarks
        # number of new landmark measurements
        nl_measurements = l * nl

        # Structure of new "state"
        # [X_WB_0, ... X_WB_{k},
        #  l_xy (all landmarks),
        #  X_WB_{k+1}, ... X_WB_{k+l}]
        n_Xk = n_p * self.dim_X + nl * self.dim_l
        n_rows_F = l * self.dim_X
        n_rows_H = 2 * nl_measurements
        n_cols = l * self.dim_X + n_Xk

        # F_whitened is devided by sigma_w.
        if torch.is_tensor(dX_WB):
            F_whitened = torch.zeros((n_rows_F, n_cols))
            # H is the original Jacobian, before divided by sigma_v.
            H = torch.zeros((n_rows_H, n_cols))
            # as defined in Eq. (33)
            Omega_v_bar_sqrt = torch.zeros(n_rows_H)
            Omega_v_sqrt = torch.zeros(n_rows_H)
            I_X = torch.eye(self.dim_X)

            l_xy_e = torch.from_numpy(l_xy_e)
            norm = torch.norm
            vstack = torch.vstack
        else:
            F_whitened = np.zeros((n_rows_F, n_cols), dtype=dX_WB.dtype)
            H = np.zeros((n_rows_H, n_cols), dtype=dX_WB.dtype)
            Omega_v_bar_sqrt = np.zeros(n_rows_H)
            Omega_v_sqrt = np.zeros(n_rows_H)

            I_X = np.eye(self.dim_X)
            norm = np.linalg.norm
            vstack = np.vstack

        # dynamics, F_whitened.
        for i in range(l):
            i0 = i * self.dim_X
            i1 = i0 + self.dim_X

            F_whitened[i0:i1, n_Xk + i0:n_Xk + i1] = I_X / self.sigma_dynamics

            if i == 0:
                j0 = (n_p - 1) * self.dim_X
                j1 = n_p * self.dim_X
                F_whitened[i0: i1, j0: j1] = \
                    -I_X / self.sigma_dynamics
            else:
                j0 = n_Xk + i0 - self.dim_X
                j1 = n_Xk + i1 - self.dim_X
                F_whitened[i0: i1, j0: j1] = \
                    -I_X / self.sigma_dynamics

        # observations, H.
        i_row = 0
        for idx_j in range(nl):
            for i in range(l):
                d_xy = X_WB_p[i] - l_xy_e[idx_j]
                j_start_x = n_Xk + i * self.dim_X
                j_start_l = n_p * self.dim_X + idx_j * self.dim_l

                d = self.calc_range_derivatives(H,
                                                i_row,
                                                j_start_x,
                                                j_start_l,
                                                d_xy,
                                                sigma=1)

                self.calc_bearing_derivatives(H,
                                              i_row + 1,
                                              j_start_x,
                                              j_start_l,
                                              X_WB_p[i],
                                              l_xy_e[idx_j],
                                              sigma=1)

                if d > 5 * self.r_range_max:
                    p_visible = 0.
                else:
                    p_visible = 1.

                Omega_v_bar_sqrt[i_row] = p_visible / self.sigma_range
                Omega_v_bar_sqrt[i_row + 1] = p_visible / self.sigma_bearing
                Omega_v_sqrt[i_row] = 1 / self.sigma_range
                Omega_v_sqrt[i_row + 1] = 1 / self.sigma_bearing

                i_row += 2

        # A2 is the lower half of \mathcal{A} in Eq. (32),
        #   i.e. [F_whitened; H / Omega_v_sqrt].
        A2 = vstack([F_whitened, (H.T * Omega_v_bar_sqrt).T])

        return {
            "A2": A2,
            "X_WB_p": X_WB_p,
            "H": H,
            "F_whitened": F_whitened,
            "Omega_v_bar_sqrt": Omega_v_bar_sqrt,
            "Omega_v_sqrt": Omega_v_sqrt
        }

    def calc_inner_layer(self, dX_WB, l_xy_e, I_Xk):
        """
        Algorithm 4 in the paper.
        :param dX_WB:
        :param X_WB_e:
        :param l_xy_e:
        :param I_Xk: information matrix of current trajetory and landmarks.
        :return:
        """
        if torch.is_tensor(dX_WB):
            I_Xk = torch.from_numpy(I_Xk)

        n_Xk = I_Xk.shape[0]  # size of states up to now.

        def calc_I(A2, dtype):
            n_Xl = A2.shape[1] - n_Xk  # size of states into the future.
            n_X = n_Xk + n_Xl

            A21 = A2[:, :n_Xk]
            A22 = A2[:, n_Xk:]

            # I_X{k+l} = A.T.dot(A)
            if torch.is_tensor(A2):
                I = torch.zeros((n_X, n_X))
                I[:n_Xk, :n_Xk] = I_Xk + torch.mm(A21.T, A21)
                I[n_Xk:, n_Xk:] = torch.mm(A22.T, A22)
                I[:n_Xk, n_Xk:] = torch.mm(A21.T, A22)
            else:
                I = np.zeros((n_X, n_X), dtype=dtype)
                I[:n_Xk, :n_Xk] = I_Xk + A21.T.dot(A21)
                I[n_Xk:, n_Xk:] = A22.T.dot(A22)
                I[:n_Xk, n_Xk:] = A21.T.dot(A22)

            I[n_Xk:, :n_Xk] = I[:n_Xk, n_Xk:].T

            return I

        # lower half of A
        result = self.calc_A_lower_half(dX_WB, l_xy_e)
        # including landmarks.
        result["I_e"] = calc_I(result["A2"], dtype=object)
        # excluding landmarks.
        result["I_p"] = calc_I(result["F_whitened"], dtype=np.float)

        return result

    def calc_pose_predictions(self, dX_WB):
        L = len(dX_WB)
        k = len(self.X_WB_e_dict) - 1  # current time step.

        # robot configuration for time steps k + 1 : (k + L).
        if torch.is_tensor(dX_WB):
            X_WB_p = torch.zeros((L, self.dim_X))
            X_WB_e_k = torch.from_numpy(self.X_WB_e_dict[k])
        else:
            X_WB_p = np.zeros((L, self.dim_X), dtype=dX_WB.dtype)
            X_WB_e_k = self.X_WB_e_dict[k]

        for i in range(L):
            if i == 0:
                X_WB_p[i] = X_WB_e_k + dX_WB[i]
            else:
                X_WB_p[i] = X_WB_p[i - 1] + dX_WB[i]

        return X_WB_p

    @staticmethod
    def get_inverse(x):
        if torch.is_tensor(x):
            return torch.inverse
        else:
            return inv_pydrake

    def calc_objective(self, dX_WB, X_WB_e, l_xy_e, X_WB_g, alpha):
        """
        Algorithm 3 in the paper.
        :return:
        """
        L = len(dX_WB)
        I_k, _, _ = self.calc_info_matrix(X_WB_e, l_xy_e)

        if torch.is_tensor(dX_WB):
            M_u = torch.from_numpy(self.M_u)
            X_WB_g = torch.from_numpy(X_WB_g)
        else:
            M_u = self.M_u

        inv = self.get_inverse(dX_WB)

        # (a) in eq. (41).
        c_a = (dX_WB * M_u * dX_WB).sum()

        # (b) in eq. (41).
        c_b = 0.
        for l in range(L):
            result_l = self.calc_inner_layer(dX_WB[:l + 1], l_xy_e, I_k)
            Cov_e_l = inv(result_l["I_e"])
            c_b += Cov_e_l[-self.dim_X:, -self.dim_X:].diagonal().sum()

        Cov_e_L = Cov_e_l
        Cov_p_L = inv(result_l["I_p"])
        X_WB_p_L = result_l["X_WB_p"]
        A2_L = result_l["A2"]
        Omega_v_sqrt = result_l["Omega_v_sqrt"]
        Omega_v_bar_sqrt = result_l["Omega_v_bar_sqrt"]

        # (c) in eq. (41)
        c_c1 = ((X_WB_p_L[-1] - X_WB_g)**2).sum()

        H_whitened_kL = A2_L[L * self.dim_X:]
        H_kL = result_l["H"]

        # B.shape = (self.dim_X, m2),
        #   where m2 is the number of range and bearing measurements of
        #   the trajectory X_WB_p_L.
        if torch.is_tensor(dX_WB):
            B = torch.mm(Cov_e_L[-self.dim_X:],
                         H_whitened_kL.T) * Omega_v_bar_sqrt
            Q_kL = torch.mm(B.T, B)
            D = torch.mm(torch.mm(H_kL, Cov_p_L), H_kL.T)

        else:
            B = Cov_e_L[-self.dim_X:].dot(H_whitened_kL.T) * Omega_v_bar_sqrt
            Q_kL = B.T.dot(B)
            D = H_kL.dot(Cov_p_L).dot(H_kL.T)

        D[np.diag_indices_from(D)] += 1 / Omega_v_sqrt**2

        if torch.is_tensor(dX_WB):
            c_c2 = torch.mm(Q_kL, D).diagonal().sum()
        else:
            c_c2 = Q_kL.dot(D).diagonal().sum()

        c = c_a + alpha * (c_b * 50) + (1 - alpha) * (c_c1 + c_c2)
        # c = c_c1
        return {
            "c":
            c,
            "c_a":
            c_a,
            "c_b":
            c_b,
            "c_c1":
            c_c1,
            "c_c2":
            c_c2,
            "predicted_uncertainty_L":
            Cov_p_L[-self.dim_X:, -self.dim_X:].diagonal().sum()
        }

    def calc_alpha(self, dX_WB, X_WB_e, l_xy_e):
        inv = self.get_inverse(dX_WB)
        I_k, _, _ = self.calc_info_matrix(X_WB_e, l_xy_e)
        result_L = self.calc_inner_layer(dX_WB, l_xy_e, I_k)
        Cov_p_L = inv(result_L["I_p"])

        uncertainty_L = Cov_p_L[-self.dim_X:, -self.dim_X:].diagonal().sum()
        alpha = min(1, uncertainty_L / self.beta)

        if self.is_uncertainty_reduction_only:
            if alpha < self.alpha_LB:
                self.is_uncertainty_reduction_only = False
            else:
                alpha = 1
        else:
            if alpha == 1:
                self.is_uncertainty_reduction_only = True

        return float(alpha)

    def calc_objective_gradient_finite_difference(self, dX_WB, X_WB_e, l_xy_e,
                                                  X_WB_g, alpha):
        """
        Algorithm 2 in the paper.
        :param dX_WB:
        :param X_WB_e:
        :param l_xy_e:
        :param X_WB_g: goal.
        :return:
        """
        Dc = np.zeros_like(dX_WB.ravel())

        h = 1e-3
        for i in range(len(Dc)):
            dX_WB_new = dX_WB.copy()
            dX_WB_new.ravel()[i] += h
            c_new1 = self.calc_objective(dX_WB_new, X_WB_e, l_xy_e, X_WB_g,
                                         alpha)["c"]

            dX_WB_new.ravel()[i] -= 2 * h
            c_new2 = self.calc_objective(dX_WB_new, X_WB_e, l_xy_e, X_WB_g,
                                         alpha)["c"]

            Dc[i] = (c_new1 - c_new2) / h / 2

        Dc.resize(dX_WB.shape)

        return Dc

    def run_gradient_descent(self,
                             dX_WB0,
                             X_WB_e,
                             l_xy_e,
                             X_WB_g,
                             alpha=None,
                             backprop=True):
        """
        Algorithm 1 in the paper.
        :return:
        """
        dX_WB = dX_WB0.copy()
        dX_WB_torch = torch.from_numpy(dX_WB)
        dX_WB_torch.requires_grad_()
        iter_count = 0

        a = 0.4
        b = 0.5
        epsilon = 2e-3

        if alpha is None:
            alpha = self.calc_alpha(dX_WB, X_WB_e, l_xy_e)

        while True:
            if backprop:
                if dX_WB_torch.grad is not None:
                    dX_WB_torch.grad.data.zero_()
                result = self.calc_objective(dX_WB_torch, X_WB_e, l_xy_e,
                                             X_WB_g, alpha)
                c0 = result["c"]
                c0.backward()
                D_dX_WB = dX_WB_torch.grad.numpy()

                result = self.convert_cost_from_torch_tensor_to_float(result)
            else:
                result = self.calc_objective(dX_WB, X_WB_e, l_xy_e, X_WB_g,
                                             alpha)
                D_dX_WB = self.calc_objective_gradient_finite_difference(
                    dX_WB, X_WB_e, l_xy_e, X_WB_g, alpha)
                c0 = result["c"]

            print("\nIteration {}, cost \n".format(iter_count), result)

            # line search
            t = 1
            line_search_count = 0
            while True:
                result = self.calc_objective(dX_WB - t * D_dX_WB, X_WB_e,
                                             l_xy_e, X_WB_g, alpha)
                c_t = result["c"]
                if c_t < c0 - a * t * (D_dX_WB**
                                       2).sum() or line_search_count > 10:
                    break
                # print("count {}, c_t {}, ".format(line_search_count, c_t))
                t *= b
                line_search_count += 1

            dX_WB -= t * D_dX_WB

            print("Line search steps {}, gradient {}".format(
                line_search_count, np.linalg.norm(D_dX_WB)))
            print(result)
            iter_count += 1

            # termination
            is_cost_reduction_small = abs((c_t - c0) / c_t) < epsilon
            is_gradient_small = np.linalg.norm(D_dX_WB) < epsilon
            if is_cost_reduction_small or is_gradient_small or iter_count > 200:
                break

        print("alpha = {}".format(alpha), self.is_uncertainty_reduction_only)
        result["alpha"] = alpha
        return dX_WB, result

    @staticmethod
    def convert_cost_from_torch_tensor_to_float(result):
        result_no_torch = dict()
        for key, value in result.items():
            result_no_torch[key] = float(value)
        return result_no_torch

    def initialize_dX_WB_with_goal(self, X_WB_0, X_WB_g, L, max_step=0.2):
        """

        :param X_WB_0: current robot pose.
        :param X_WB_g: goal robot pose.
        :param L: number of time steps.
        :return:
        """
        step = (X_WB_g - X_WB_0) / L
        step_size = np.linalg.norm(step)
        step /= step_size
        step *= min(step_size, max_step)

        dX_WB = np.zeros((L, X_WB_0.size))
        dX_WB[:] = step

        return dX_WB

    def initialize_dX_WB_with_fixed_input(self, X_WB_0, dX_WB0, L):
        """

        :param X_WB_0: current robot pose.
        :param dX_WB0: fixed input.
        :param L: number of time steps.
        :return:
        """
        dX_WB = np.zeros((L, X_WB_0.size))
        dX_WB[:] = dX_WB0
        return dX_WB

    def run_bundle_adjustment(self, is_printing=False):
        dim_l = self.dim_l  # dimension of landmarks.
        dim_X = self.dim_X  # dimension of poses.

        X_WB_e0 = self.get_X_WB_initial_guess_array()
        l_xy_e0 = self.get_l_xy_initial_guess_array()
        X_WB_e = X_WB_e0.copy()
        l_xy_e = l_xy_e0.copy()

        n_o = len(self.odometry_measurements)
        n_p = n_o + 1  # number of poses.
        n_l = len(self.landmark_measurements)

        steps_counter = 0
        while True:
            I_k, q, c = self.calc_info_matrix(X_WB_e, l_xy_e)
            prog = mp.MathematicalProgram()

            dX_WB_e = prog.NewContinuousVariables(n_p, dim_X, "dX_WB_e")
            dl_xy_e = prog.NewContinuousVariables(n_l, dim_l, "dl_xy_e")
            dx = np.hstack((dX_WB_e.ravel(), dl_xy_e.ravel()))

            nx = len(dx)
            prog.AddQuadraticCost(I_k, q, 0.5 * c, dx)
            prog.AddQuadraticCost(np.eye(nx) * 2, np.zeros(nx), dx)

            result = self.solver.Solve(prog)
            optimal_cost = result.get_optimal_cost()
            assert result.get_solution_result() == \
                   mp.SolutionResult.kSolutionFound

            dX_WB_e_values = result.GetSolution(dX_WB_e)
            dl_xy_e_values = result.GetSolution(dl_xy_e)
            dx_values = result.GetSolution(dx)

            X_WB_e += dX_WB_e_values
            l_xy_e += dl_xy_e_values

            steps_counter += 1
            if steps_counter > 500 or \
                    np.linalg.norm(dx_values) / (n_p + n_l) < 5e-3:
                break

        self.update_beliefs(X_WB_e, l_xy_e, I_k)

        if is_printing:
            print("\nStep ", n_o)
            print("optimal cost: ", optimal_cost)
            _, b, _ = self.calc_jacobian_and_b(X_WB_e, l_xy_e)
            print("b norm recalculated: ", np.linalg.norm(b))
            print("total gradient steps: ", steps_counter)
            print("dx norm: ", np.linalg.norm(dx_values))
            Omega_pose = self.marginalize_info_matrix(I_k, n_p, n_l)
            print("Marginalized covariance diagonal\n",
                  np.linalg.inv(Omega_pose).diagonal())
            Cov = np.linalg.inv(I_k)
            print("Sqrt covariance trace\n", np.sqrt(Cov.diagonal().sum()))
            print("\n")

        return X_WB_e, l_xy_e

    def update_beliefs(self, X_WB_e_values, l_xy_e_values, I_all):
        Cov = np.linalg.inv(I_all)

        for i, X_WB_e in enumerate(X_WB_e_values):
            i0 = i * self.dim_X
            i1 = i0 + self.dim_X
            self.X_WB_e_dict[i] = X_WB_e
            Cov_i = Cov[i0:i1, i0:i1]
            e_values, e_vectors = np.linalg.eig(Cov_i)
            X_WB_ellipsoid = np.eye(4)
            X_WB_ellipsoid[:2, :2] = e_vectors
            X_WB_ellipsoid[:2, 3] = X_WB_e
            sigmas = np.sqrt(e_values)
            self.X_WB_e_var_dict[i] = {
                "sigmas": np.hstack([np.real(sigmas), 0.01]),
                "transform": X_WB_ellipsoid,
                "cov": Cov_i
            }

        self.l_xy_e_dict.clear()
        for k, l_xy_e_value in zip(self.landmark_measurements.keys(),
                                   l_xy_e_values):
            self.l_xy_e_dict[k] = l_xy_e_value

        self.I_all = I_all

    def calc_sqrt_postion_cov_trace(self):
        postion_cov_trace = 0.
        for a in self.X_WB_e_var_dict.values():
            postion_cov_trace += a["cov"].diagonal().sum()

        return np.sqrt(postion_cov_trace)

    def find_visible_landmarks(self, X_WB_p):
        future_landmark_measurements = dict()
        for i, X_WB in enumerate(X_WB_p):
            for j in self.l_xy_e_dict.keys():
                d_xy = self.l_xy_e_dict[j] - X_WB
                d = np.linalg.norm(d_xy)
                if self.r_range_min < d:
                    if j not in future_landmark_measurements:
                        future_landmark_measurements[j] = dict()
                    b = np.arctan2(d_xy[1], d_xy[0])
                    future_landmark_measurements[j][i] = {
                        "distance": d,
                        "bearing": b
                    }

        return future_landmark_measurements

    def draw_estimated_path(self):
        n_p = len(self.X_WB_e_dict)
        t_xy_e = np.zeros((n_p, 2))
        for i in range(n_p):
            t_xy_e[i] = self.X_WB_e_dict[i][:2]
        self.draw_robot_path(t_xy_e,
                             color=[1, 0, 0],
                             prefix="robot_poses_e",
                             idx_segment=0,
                             size=0.2)

    def draw_estimated_path_segment(self,
                                    L: int,
                                    idx_segment: int,
                                    covariance_scale=1.):
        n_p = len(self.X_WB_e_dict)
        name = "robot_poses_e/{}".format(idx_segment)
        material = meshcat.geometry.MeshLambertMaterial(color=0x778899,
                                                        opacity=0.5)

        if L is None:
            L = n_p

        points = np.zeros((L, 3))
        for i in range(L):
            idx = n_p - L + i
            # Draw covariance ellipses.
            sigmas = self.X_WB_e_var_dict[idx]["sigmas"]
            sigmas[:2] *= covariance_scale
            self.vis[name]["points"][str(i)].set_object(
                meshcat.geometry.Ellipsoid(sigmas), material)
            self.vis[name]["points"][str(i)].set_transform(
                self.X_WB_e_var_dict[idx]["transform"])

            points[i, :2] = self.X_WB_e_dict[idx]

        # Draw lines.
        self.vis[name]["path"].set_object(
            meshcat.geometry.Line(meshcat.geometry.PointsGeometry(points.T),
                                  material))

    def draw_estimated_landmarks(self):
        nl = len(self.l_xy_e_dict)
        l_xy_e = np.zeros((nl, 3))

        for i, (k, l_xy_e_i) in enumerate(self.l_xy_e_dict.items()):
            l_xy_e[i][:2] = l_xy_e_i
            points = np.zeros((2, 3))
            points[0, :2] = l_xy_e_i
            points[1, :2] = self.l_xy[k]
            self.vis["landmarks_e"]["corrspondances"][str(k)].set_object(
                meshcat.geometry.Line(meshcat.geometry.PointsGeometry(
                    points.T)))

            # draw landmark
            prefix = "landmarks_e/points/{}".format(i)
            self.frontend.draw_cylinder(prefix, 0.05, 0.11, 0x00ffff, l_xy_e_i)

    def draw_robot_path(self,
                        X_WBs,
                        color,
                        prefix: str,
                        idx_segment: int,
                        size=0.2):
        t_xy = X_WBs[:, :2]
        for i in range(1, len(t_xy)):
            name = prefix + "/{}/points/{}".format(idx_segment, i)
            self.frontend.draw_box(name, [size, size, 0.15], color, t_xy[i])

            points = np.zeros((2, 3))
            points[0, :2] = t_xy[i - 1]
            points[1, :2] = t_xy[i]
            self.vis[prefix][str(idx_segment)]["path"][str(i)].set_object(
                meshcat.geometry.Line(meshcat.geometry.PointsGeometry(
                    points.T)))
Пример #24
0
def quadratic_program(H, f, A, b, C=None, d=None, tol=1.e-5, **kwargs):
    """
    Solves the strictly convex (H > 0) quadratic program min .5 x' H x + f' x s.t. A x <= b, C x  = d.

    Arguments
    ----------
    H : numpy.ndarray
        Positive definite Hessian of the cost function.
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.
    tol : float
        Maximum value of a residual of an inequality to consider the related constraint active.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the QP.

        Fields
        ----------
        min : float
            Minimum of the QP (None if the problem is unfeasible).
        argmin : numpy.ndarray
            Argument that minimizes the QP (None if the problem is unfeasible).
        active_set : list of int
            Indices of the active inequallities {i | A_i argmin = b} (None if the problem is unfeasible).
        multiplier_inequality : numpy.ndarray
            Lagrange multipliers for the inequality constraints (None if the problem is unfeasible).
        multiplier_equality : numpy.ndarray
            Lagrange multipliers for the equality constraints (None if the problem is unfeasible or without equality constraints).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # build program
    prog = MathematicalProgram()
    x = prog.NewContinuousVariables(n_x)
    [prog.AddLinearConstraint(A[i].dot(x) <= b[i]) for i in range(n_ineq)]
    [prog.AddLinearConstraint(C[i].dot(x) == d[i]) for i in range(n_eq)]
    inequalities = []
    prog.AddQuadraticCost(.5*x.dot(H).dot(x) + f.dot(x))

    # solve
    solver = GurobiSolver()
    prog.SetSolverOption(solver.solver_type(), 'OutputFlag', 0)
    [prog.SetSolverOption(solver.solver_type(), parameter, value) for parameter, value in kwargs.items()]
    result = prog.Solve()

    # initialize output
    sol = {
        'min': None,
        'argmin': None,
        'active_set': None,
        'multiplier_inequality': None,
        'multiplier_equality': None
    }

    if result == SolutionResult.kSolutionFound:
        sol['argmin'] = prog.GetSolution(x)
        sol['min'] = .5*sol['argmin'].dot(H).dot(sol['argmin']) + f.dot(sol['argmin'])
        sol['active_set'] = np.where(A.dot(sol['argmin']) - b > -tol)[0].tolist()

        # retrieve multipliers through KKT conditions
        lhs = A[sol['active_set']]
        rhs = b[sol['active_set']]
        if n_eq > 0:
            lhs = np.vstack((lhs, C))
            rhs = np.concatenate((rhs, d))
        H_inv = np.linalg.inv(H)
        M = lhs.dot(H_inv).dot(lhs.T)
        m = - np.linalg.inv(M).dot(lhs.dot(H_inv).dot(f) + rhs)
        sol['multiplier_inequality'] = np.zeros(n_ineq)
        for i, j in enumerate(sol['active_set']):
            sol['multiplier_inequality'][j] = m[i]
        if n_eq > 0:
            sol['multiplier_equality'] = m[len(sol['active_set']):]

    return sol
Пример #25
0
	z = prog.NewBinaryVariables(num_regions, "z") # Integer variables that represent the region the point will be in
	prog.AddLinearConstraint(np.sum(z) == 1) # only one is set
	
	# Create M (TODO: calculate this value)
	M = 100

	# Constrain the points to the regions
	for i in range(num_regions):
		for j in range(A[i].shape[0]):
			prog.AddLinearConstraint(A[i][j][0]*x[0]+A[i][j][1]*x[1] + M*z[i] <= b[i][j] + M)

	# Add objective
	prog.AddQuadraticCost((x[0]-x_goal[0])**2 + (x[1]-x_goal[1])**2) # distance of x to the goal point

	# Solve problem
	solver = GurobiSolver()
	assert(solver.available())
	assert(solver.solver_type()==mp.SolverType.kGurobi)
	result = solver.Solve(prog)
	assert(result == mp.SolutionResult.kSolutionFound)
	print("Goal: " + str(x_goal))
	finalx = prog.GetSolution(x)
	print("Final Solution: " + str(finalx))

	# ********* GRAPH PROBLEM *********
	# Create figure
	fig = plt.figure(1, (20, 10))
	plt.title("Minimize distance of point within " + str(num_regions) + " " + str(dim) + "-D Polytopes to Goal Point")

	# Plot regions
	for j in range(num_regions):
Пример #26
0
class TestMathematicalProgram(unittest.TestCase):
    def test_program_construction(self):
        prog = mp.MathematicalProgram()
        vars = prog.NewContinuousVariables(5, "x")
        self.assertEqual(vars.dtype, sym.Variable)
        vars_all = prog.decision_variables()
        self.assertEqual(vars_all.shape, (5, ))

    def test_program_attributes_and_solver_selection(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")

        # Add linear equality constraints; make sure the solver works.
        prog.AddLinearConstraint(x[0] + x[1] == 0)
        prog.AddLinearConstraint(2 * x[0] - x[1] == 1)
        solver_id = mp.ChooseBestSolver(prog)
        self.assertEqual(solver_id.name(), "Linear system")
        solver = mp.MakeSolver(solver_id)
        self.assertEqual(solver.solver_id().name(), "Linear system")
        self.assertTrue(solver.AreProgramAttributesSatisfied(prog))
        result = solver.Solve(prog, None, None)
        self.assertTrue(result.is_success())

        # With an inequality constraint added, the "Linear system" solver
        # doesn't work anymore.
        prog.AddLinearConstraint(x[0] >= 0)
        self.assertFalse(solver.AreProgramAttributesSatisfied(prog))
        with self.assertRaises(ValueError):
            solver.Solve(prog, None, None)

        # A different solver will work, though.  We re-use the result object
        # (as a mutable output argument), and make sure that it changes.
        solver_id = mp.ChooseBestSolver(prog)
        self.assertNotEqual(solver_id.name(), "Linear system")
        solver = mp.MakeSolver(solver_id)
        solver.Solve(prog, None, None, result)
        self.assertTrue(result.is_success())
        self.assertEqual(result.get_solver_id().name(), solver_id.name())

    def test_module_level_solve_function_and_result_accessors(self):
        qp = TestQP()
        x_expected = np.array([1, 1])
        result = mp.Solve(qp.prog)
        self.assertTrue(result.is_success())
        self.assertTrue(np.allclose(result.get_x_val(), x_expected))
        self.assertEqual(result.get_solution_result(),
                         mp.SolutionResult.kSolutionFound)
        self.assertEqual(result.get_optimal_cost(), 3.0)
        self.assertTrue(result.get_solver_id().name())
        self.assertTrue(np.allclose(result.GetSolution(), x_expected))
        self.assertAlmostEqual(result.GetSolution(qp.x[0]), 1.0)
        self.assertTrue(np.allclose(result.GetSolution(qp.x), x_expected))
        self.assertTrue(
            result.GetSolution(sym.Expression(qp.x[0])).EqualTo(
                result.GetSolution(qp.x[0])))
        m = np.array([sym.Expression(qp.x[0]), sym.Expression(qp.x[1])])
        self.assertTrue(
            result.GetSolution(m)[1, 0].EqualTo(result.GetSolution(qp.x[1])))


# TODO(jwnimmer-tri) MOSEK is also able to solve mixed integer programs;
# perhaps we should test both of them?

    @unittest.skipUnless(GurobiSolver().available(), "Requires Gurobi")
    def test_mixed_integer_optimization(self):
        prog = mp.MathematicalProgram()
        x = prog.NewBinaryVariables(3, "x")
        c = np.array([-1.0, -1.0, -2.0])
        prog.AddLinearCost(c.dot(x))
        a = np.array([1.0, 2.0, 3.0])
        prog.AddLinearConstraint(a.dot(x) <= 4)
        prog.AddLinearConstraint(x[0] + x[1], 1, np.inf)
        prog.AddConstraint(
            LinearConstraint(np.array([[1., 1.]]), np.array([1]),
                             np.array([np.inf])), [x[0], x[1]])
        solver = GurobiSolver()
        result = solver.Solve(prog, None, None)
        self.assertTrue(result.is_success())

        # Test that we got the right solution for all x
        x_expected = np.array([1.0, 0.0, 1.0])
        self.assertTrue(np.all(np.isclose(result.GetSolution(x), x_expected)))

        # Also test by asking for the value of each element of x
        for i in range(3):
            self.assertAlmostEqual(result.GetSolution(x[i]), x_expected[i])

    def test_qp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        # N.B. Scalar-wise logical ops work for Expression, but array ops need
        # the workaround overloads from `pydrake.math`.
        prog.AddLinearConstraint(ge(x, 1))
        prog.AddQuadraticCost(np.eye(2), np.zeros(2), x)
        prog.AddQuadraticCost(np.eye(2), np.zeros(2), 1, x)
        # Redundant cost just to check the spelling.
        prog.AddQuadraticErrorCost(vars=x, Q=np.eye(2), x_desired=np.zeros(2))
        prog.AddL2NormCost(A=np.eye(2), b=np.zeros(2), vars=x)

        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

    def test_symbolic_qp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        prog.AddConstraint(x[0], 1., 100.)
        prog.AddConstraint(x[1] >= 1)
        prog.AddQuadraticCost(x[0]**2 + x[1]**2)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

    def test_bindings(self):
        qp = TestQP()
        prog = qp.prog
        x = qp.x

        self.assertEqual(prog.FindDecisionVariableIndices(vars=[x[0], x[1]]),
                         [0, 1])
        self.assertEqual(prog.decision_variable_index()[x[0].get_id()], 0)
        self.assertEqual(prog.decision_variable_index()[x[1].get_id()], 1)

        for binding in prog.GetAllCosts():
            self.assertIsInstance(binding.evaluator(), mp.Cost)
        for binding in prog.GetLinearConstraints():
            self.assertIsInstance(binding.evaluator(), mp.Constraint)
        for binding in prog.GetAllConstraints():
            self.assertIsInstance(binding.evaluator(), mp.Constraint)

        self.assertTrue(prog.linear_costs())
        for (i, binding) in enumerate(prog.linear_costs()):
            cost = binding.evaluator()
            self.assertTrue(np.allclose(cost.a(), np.ones((1, 2))))
            self.assertIsNone(cost.gradient_sparsity_pattern())

        self.assertTrue(prog.quadratic_costs())
        for (i, binding) in enumerate(prog.quadratic_costs()):
            cost = binding.evaluator()
            self.assertTrue(np.allclose(cost.Q(), np.eye(2)))
            self.assertTrue(np.allclose(cost.b(), np.zeros(2)))
            self.assertIsNone(cost.gradient_sparsity_pattern())

        self.assertTrue(prog.bounding_box_constraints())
        for (i, binding) in enumerate(prog.bounding_box_constraints()):
            constraint = binding.evaluator()
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[0]),
                prog.FindDecisionVariableIndex(var=x[i]))
            self.assertIsNone(constraint.gradient_sparsity_pattern())
            num_constraints = constraint.num_constraints()
            if num_constraints == 1:
                self.assertEqual(constraint.A(), 1)
                self.assertEqual(constraint.lower_bound(), 1)
                self.assertEqual(constraint.upper_bound(), np.inf)
            else:
                self.assertTrue(np.allclose(constraint.A(), np.eye(2)))
                self.assertTrue(
                    np.allclose(constraint.lower_bound(), [1, -np.inf]))
                self.assertTrue(
                    np.allclose(constraint.upper_bound(), [np.inf, 2]))

        self.assertTrue(prog.linear_constraints())
        for (i, binding) in enumerate(prog.linear_constraints()):
            constraint = binding.evaluator()
            self.assertIsNone(constraint.gradient_sparsity_pattern())
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[0]),
                prog.FindDecisionVariableIndex(var=x[0]))
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[1]),
                prog.FindDecisionVariableIndex(var=x[1]))
            self.assertTrue(np.allclose(constraint.A(), [3, -1]))
            self.assertTrue(constraint.lower_bound(), -2)
            self.assertTrue(constraint.upper_bound(), np.inf)

        self.assertTrue(prog.linear_equality_constraints())
        for (i, binding) in enumerate(prog.linear_equality_constraints()):
            self.assertIsNone(constraint.gradient_sparsity_pattern())
            constraint = binding.evaluator()
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[0]),
                prog.FindDecisionVariableIndex(var=x[0]))
            self.assertEqual(
                prog.FindDecisionVariableIndex(var=binding.variables()[1]),
                prog.FindDecisionVariableIndex(var=x[1]))
            self.assertTrue(np.allclose(constraint.A(), [1, 2]))
            self.assertTrue(constraint.lower_bound(), 3)
            self.assertTrue(constraint.upper_bound(), 3)

        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

        x_expected = np.array([1, 1])
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

    def test_constraint_api(self):
        prog = mp.MathematicalProgram()
        x0, = prog.NewContinuousVariables(1, "x")
        c = prog.AddLinearConstraint(x0 >= 2).evaluator()
        ce = prog.AddLinearEqualityConstraint(2 * x0, 1).evaluator()

        self.assertTrue(c.CheckSatisfied([2.], tol=1e-3))
        self.assertFalse(c.CheckSatisfied([AutoDiffXd(1.)]))
        self.assertIsInstance(c.CheckSatisfied([x0]), sym.Formula)

        ce.set_description("my favorite constraint")
        self.assertEqual(ce.get_description(), "my favorite constraint")

        def check_bounds(c, A, lb, ub):
            self.assertTrue(np.allclose(c.A(), A))
            self.assertTrue(np.allclose(c.lower_bound(), lb))
            self.assertTrue(np.allclose(c.upper_bound(), ub))

        check_bounds(c, [1.], [2.], [np.inf])
        c.UpdateLowerBound([3.])
        check_bounds(c, [1.], [3.], [np.inf])
        c.UpdateUpperBound([4.])
        check_bounds(c, [1.], [3.], [4.])
        c.set_bounds([-10.], [10.])
        check_bounds(c, [1.], [-10.], [10.])
        c.UpdateCoefficients([10.], [-20.], [-30.])
        check_bounds(c, [10.], [-20.], [-30.])

        check_bounds(ce, [2.], [1.], [1.])
        ce.UpdateCoefficients([10.], [20.])
        check_bounds(ce, [10.], [20.], [20.])

    def test_cost_api(self):
        prog = mp.MathematicalProgram()
        x0, = prog.NewContinuousVariables(1, "x")
        lc = prog.AddLinearCost([1], 2, [x0]).evaluator()
        qc = prog.AddQuadraticCost(0.5 * x0**2 + 2 * x0 + 3).evaluator()

        def check_linear_cost(cost, a, b):
            self.assertTrue(np.allclose(cost.a(), a))
            self.assertTrue(np.allclose(cost.b(), b))

        check_linear_cost(lc, [1.], 2.)
        lc.UpdateCoefficients([10.])
        check_linear_cost(lc, [10.], 0.)

        def check_quadratic_cost(cost, Q, b, c):
            self.assertTrue(np.allclose(cost.Q(), Q))
            self.assertTrue(np.allclose(cost.b(), b))
            self.assertTrue(np.allclose(cost.c(), c))

        check_quadratic_cost(qc, [1.], [2.], 3.)
        qc.UpdateCoefficients([10.], [20.])
        check_quadratic_cost(qc, [10.], [20.], 0)

    def test_eval_binding(self):
        qp = TestQP()
        prog = qp.prog

        x = qp.x
        x_expected = np.array([1., 1.])

        costs = qp.costs
        cost_values_expected = [2., 1.]
        constraints = qp.constraints
        constraint_values_expected = [1., 1., 2., 3.]

        result = mp.Solve(prog)
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

        enum = zip(constraints, constraint_values_expected)
        for (constraint, value_expected) in enum:
            value = result.EvalBinding(constraint)
            self.assertTrue(np.allclose(value, value_expected))

        enum = zip(costs, cost_values_expected)
        for (cost, value_expected) in enum:
            value = result.EvalBinding(cost)
            self.assertTrue(np.allclose(value, value_expected))

        self.assertIsInstance(result.EvalBinding(costs[0]), np.ndarray)

        # Bindings for `Eval`.
        x_list = (float(1.), AutoDiffXd(1.), sym.Variable("x"))
        T_y_list = (float, AutoDiffXd, sym.Expression)
        evaluator = costs[0].evaluator()
        for x_i, T_y_i in zip(x_list, T_y_list):
            y_i = evaluator.Eval(x=[x_i, x_i])
            self.assertIsInstance(y_i[0], T_y_i)

    def test_get_binding_variable_values(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(3)
        binding1 = prog.AddBoundingBoxConstraint(-1, 1, x[0])
        binding2 = prog.AddLinearEqualityConstraint(x[1] + 2 * x[2], 2)
        x_val = np.array([-2., 1., 2.])
        np.testing.assert_allclose(
            prog.GetBindingVariableValues(binding1, x_val), np.array([-2]))
        np.testing.assert_allclose(
            prog.GetBindingVariableValues(binding2, x_val), np.array([1, 2]))

    def test_matrix_variables(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 2, "x")
        for i in range(2):
            for j in range(2):
                prog.AddLinearConstraint(x[i, j] == 2 * i + j)
        result = mp.Solve(prog)
        xval = result.GetSolution(x)
        for i in range(2):
            for j in range(2):
                self.assertAlmostEqual(xval[i, j], 2 * i + j)
                self.assertEqual(xval[i, j], result.GetSolution(x[i, j]))
        # Just check spelling.
        y = prog.NewIndeterminates(2, 2, "y")

    def test_sdp(self):
        prog = mp.MathematicalProgram()
        S = prog.NewSymmetricContinuousVariables(3, "S")
        prog.AddLinearConstraint(S[0, 1] >= 1)
        prog.AddPositiveSemidefiniteConstraint(S)
        prog.AddPositiveSemidefiniteConstraint(S + S)
        prog.AddLinearCost(np.trace(S))
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())
        S = result.GetSolution(S)
        eigs = np.linalg.eigvals(S)
        tol = 1e-8
        self.assertTrue(np.all(eigs >= -tol))
        self.assertTrue(S[0, 1] >= -tol)

    def test_sos(self):
        # Find a,b,c,d subject to
        # a(0) + a(1)*x,
        # b(0) + 2*b(1)*x + b(2)*x^2 is SOS,
        # c(0)*x^2 + 2*c(1)*x*y + c(2)*y^2 is SOS,
        # d(0)*x^2 is SOS.
        # d(1)*x^2 is SOS.
        # d(0) + d(1) = 1
        prog = mp.MathematicalProgram()
        x = prog.NewIndeterminates(1, "x")
        self.assertEqual(prog.indeterminates_index()[x[0].get_id()], 0)
        poly = prog.NewFreePolynomial(sym.Variables(x), 1)
        (poly,
         binding) = prog.NewSosPolynomial(indeterminates=sym.Variables(x),
                                          degree=2)
        y = prog.NewIndeterminates(1, "y")
        self.assertEqual(prog.indeterminates_index()[y[0].get_id()], 1)
        (poly,
         binding) = prog.NewSosPolynomial(monomial_basis=(sym.Monomial(x[0]),
                                                          sym.Monomial(y[0])))
        d = prog.NewContinuousVariables(2, "d")
        prog.AddSosConstraint(d[0] * x.dot(x))
        prog.AddSosConstraint(d[1] * x.dot(x), [sym.Monomial(x[0])])
        prog.AddLinearEqualityConstraint(d[0] + d[1] == 1)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

    def test_equality_between_polynomials(self):
        prog = mp.MathematicalProgram()
        x = prog.NewIndeterminates(1, "x")
        a = prog.NewContinuousVariables(2, "a")
        prog.AddEqualityConstraintBetweenPolynomials(
            sym.Polynomial(2 * a[0] * x[0] + a[1] + 2, x),
            sym.Polynomial(2 * x[0] + 4, x))
        result = mp.Solve(prog)
        a_val = result.GetSolution(a)
        self.assertAlmostEqual(a_val[0], 1)
        self.assertAlmostEqual(a_val[1], 2)

    def test_log_determinant(self):
        # Find the minimal ellipsoid that covers some given points.
        prog = mp.MathematicalProgram()
        X = prog.NewSymmetricContinuousVariables(2)
        pts = np.array([[1, 1], [1, -1], [-1, 1]])
        for i in range(3):
            pt = pts[i, :]
            prog.AddLinearConstraint(pt.dot(X.dot(pt)) <= 1)
        prog.AddMaximizeLogDeterminantSymmetricMatrixCost(X)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

    def test_maximize_geometric_mean(self):
        # Find the smallest axis-algined ellipsoid that covers some given
        # points.
        prog = mp.MathematicalProgram()
        a = prog.NewContinuousVariables(2)
        pts = np.array([[1, 1], [1, -1], [-1, 1]])
        for i in range(3):
            pt = pts[i, :]
            prog.AddLinearConstraint(pt.dot(a * pt) <= 1)
        prog.AddMaximizeGeometricMeanCost(a, 1)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

    def test_max_geometric_mean_trivial(self):
        # Solve the trivial problem.
        # max (2x+3)*(3x+2)
        # s.t 2x+3 >= 0
        #     3x+2 >= 0
        #     x <= 10
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1)
        prog.AddLinearConstraint(x[0] <= 10)
        A = np.array([2, 3])
        b = np.array([3, 2])
        prog.AddMaximizeGeometricMeanCost(A, b, x)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

    def test_lcp(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 'x')
        M = np.array([[1, 3], [4, 1]])
        q = np.array([-16, -15])
        binding = prog.AddLinearComplementarityConstraint(M, q, x)
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())
        self.assertIsInstance(binding.evaluator(),
                              mp.LinearComplementarityConstraint)

    def test_linear_constraints(self):
        # TODO(eric.cousineau): Add more general tests
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, 'x')
        lb = [0., 0.]
        ub = [1., 1.]
        prog.AddBoundingBoxConstraint(lb, ub, x)
        prog.AddBoundingBoxConstraint(0., 1., x[0])
        prog.AddBoundingBoxConstraint(0., 1., x)
        prog.AddLinearConstraint(A=np.eye(2),
                                 lb=np.zeros(2),
                                 ub=np.ones(2),
                                 vars=x)
        prog.AddLinearConstraint(e=x[0], lb=0, ub=1)
        prog.AddLinearConstraint(v=x, lb=[0, 0], ub=[1, 1])
        prog.AddLinearConstraint(f=(x[0] == 0))

        prog.AddLinearEqualityConstraint(np.eye(2), np.zeros(2), x)
        prog.AddLinearEqualityConstraint(x[0] == 1)
        prog.AddLinearEqualityConstraint(x[0] + x[1], 1)

    def test_constraint_gradient_sparsity(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")

        def cost(x):
            return x[0]**2

        def constraint(x):
            return x[1]**2

        cost_binding = prog.AddCost(cost, vars=x)
        constraint_binding = prog.AddConstraint(constraint, [0], [1], vars=x)
        cost_evaluator = cost_binding.evaluator()
        constraint_evaluator = constraint_binding.evaluator()
        self.assertIsNone(cost_evaluator.gradient_sparsity_pattern())
        self.assertIsNone(constraint_evaluator.gradient_sparsity_pattern())
        # Now set the sparsity
        cost_evaluator.SetGradientSparsityPattern([(0, 0)])
        self.assertEqual(cost_evaluator.gradient_sparsity_pattern(), [(0, 0)])
        constraint_binding.evaluator().SetGradientSparsityPattern([(0, 1)])
        self.assertEqual(constraint_evaluator.gradient_sparsity_pattern(),
                         [(0, 1)])

    def test_pycost_and_pyconstraint(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')

        def cost(x):
            return (x[0] - 1.) * (x[0] - 1.)

        def constraint(x):
            return x

        cost_binding = prog.AddCost(cost, vars=x)
        constraint_binding = prog.AddConstraint(constraint,
                                                lb=[0.],
                                                ub=[2.],
                                                vars=x)
        result = mp.Solve(prog)
        xstar = result.GetSolution(x)
        self.assertAlmostEqual(xstar[0], 1.)

        # Verify that they can be evaluated.
        self.assertAlmostEqual(cost_binding.evaluator().Eval(xstar), 0.)
        self.assertAlmostEqual(constraint_binding.evaluator().Eval(xstar), 1.)

    def test_addcost_symbolic(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1, 'x')
        prog.AddCost((x[0] - 1.)**2)
        prog.AddConstraint(0 <= x[0])
        prog.AddConstraint(x[0] <= 2)
        result = mp.Solve(prog)
        self.assertAlmostEqual(result.GetSolution(x)[0], 1.)

    def test_initial_guess(self):
        prog = mp.MathematicalProgram()
        count = 6
        shape = (2, 3)
        x = prog.NewContinuousVariables(count, 'x')
        x_matrix = x.reshape(shape)
        x0 = np.arange(count)
        x0_matrix = x0.reshape(shape)
        all_nan = np.full(x.shape, np.nan)
        self.assertTrue(np.isnan(prog.GetInitialGuess(x)).all())

        def check_and_reset():
            self.assertTrue((prog.GetInitialGuess(x) == x0).all())
            self.assertTrue(
                (prog.GetInitialGuess(x_matrix) == x0_matrix).all())
            prog.SetInitialGuess(x, all_nan)
            self.assertTrue(np.isnan(prog.GetInitialGuess(x)).all())

        # Test setting individual variables
        for i in range(count):
            prog.SetInitialGuess(x[i], x0[i])
            self.assertEqual(prog.GetInitialGuess(x[i]), x0[i])
        check_and_reset()

        # Test setting matrix values using both
        # 1d and 2d np arrays.
        prog.SetInitialGuess(x, x0)
        check_and_reset()
        prog.SetInitialGuess(x_matrix, x0_matrix)
        check_and_reset()

        # Test setting all values at once.
        prog.SetInitialGuessForAllVariables(x0)
        check_and_reset()

        # Check an extrinsic guess.  We sanity check changes to the guess using
        # loose "any" and "all" predicates rather than specific indices because
        # we should not presume how variables map into indices.
        guess = np.ndarray(count)
        guess.fill(np.nan)
        self.assertTrue(all([np.isnan(i) for i in guess]))
        prog.SetDecisionVariableValueInVector(x[0], x0[0], guess)
        self.assertFalse(all([np.isnan(i) for i in guess]))
        self.assertTrue(any([np.isnan(i) for i in guess]))
        prog.SetDecisionVariableValueInVector(x_matrix, x0_matrix, guess)
        self.assertFalse(any([np.isnan(i) for i in guess]))

    @unittest.skipIf(SNOPT_NO_GUROBI,
                     "SNOPT is unable to solve this problem (#10653).")
    def test_lorentz_cone_constraint(self):
        # Set Up Mathematical Program
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(2, "x")
        z = prog.NewContinuousVariables(1, "z")
        prog.AddCost(z[0])

        # Add LorentzConeConstraints
        prog.AddLorentzConeConstraint(
            np.array([0 * x[0] + 1, x[0] - 1, x[1] - 1]))
        prog.AddLorentzConeConstraint(np.array([z[0], x[0], x[1]]))

        # Test result
        result = mp.Solve(prog)
        self.assertTrue(result.is_success())

        # Check answer
        x_expected = np.array([1 - 2**(-0.5), 1 - 2**(-0.5)])
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

    def test_solver_options(self):
        prog = mp.MathematicalProgram()

        prog.SetSolverOption(SolverType.kGurobi, "double_key", 1.0)
        prog.SetSolverOption(GurobiSolver().solver_id(), "int_key", 2)
        prog.SetSolverOption(SolverType.kGurobi, "string_key", "3")

        options = prog.GetSolverOptions(SolverType.kGurobi)
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })
        options = prog.GetSolverOptions(GurobiSolver().solver_id())
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })

        # For now, just make sure the constructor exists.  Once we bind more
        # accessors, we can test them here.
        options_object = SolverOptions()
        solver_id = SolverId("dummy")
        self.assertEqual(solver_id.name(), "dummy")
        options_object.SetOption(solver_id, "double_key", 1.0)
        options_object.SetOption(solver_id, "int_key", 2)
        options_object.SetOption(solver_id, "string_key", "3")
        options = options_object.GetOptions(solver_id)
        self.assertDictEqual(options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })

        prog.SetSolverOptions(options_object)
        prog_options = prog.GetSolverOptions(solver_id)
        self.assertDictEqual(prog_options, {
            "double_key": 1.0,
            "int_key": 2,
            "string_key": "3"
        })

    def test_infeasible_constraints(self):
        prog = mp.MathematicalProgram()
        x = prog.NewContinuousVariables(1)
        result = mp.Solve(prog)
        infeasible = mp.GetInfeasibleConstraints(prog=prog,
                                                 result=result,
                                                 tol=1e-4)
        self.assertEquals(len(infeasible), 0)

    def test_add_indeterminates_and_decision_variables(self):
        prog = mp.MathematicalProgram()
        x0 = sym.Variable("x0")
        x1 = sym.Variable("x1")
        a0 = sym.Variable("a0")
        a1 = sym.Variable("a1")
        prog.AddIndeterminates(np.array([x0, x1]))
        prog.AddDecisionVariables(np.array([a0, a1]))
        numpy_compare.assert_equal(prog.decision_variables()[0], a0)
        numpy_compare.assert_equal(prog.decision_variables()[1], a1)
        numpy_compare.assert_equal(prog.indeterminates()[0], x0)
        numpy_compare.assert_equal(prog.indeterminate(1), x1)
Пример #27
0
    def solveProgram(self):
        if (not self.upToDate):
            prog = mp.MathematicalProgram()
            M = 100

            # SET UP FOOTSTEP VARIABLES
            f = []
            for fNum in range(FootstepPlanner.MAXFOOTSTEPS):
                fnR = prog.NewContinuousVariables(self.dim, "fr" +
                                                  str(fNum))  # Right Footstep
                fnL = prog.NewContinuousVariables(self.dim, "fl" +
                                                  str(fNum))  # Left Footstep
                f.append(fnR)
                f.append(fnL)
            n = prog.NewBinaryVariables(
                2 * FootstepPlanner.MAXFOOTSTEPS,
                "n")  # binary variables for nominal regions

            # CONSTRAIN WITH REACHABLE REGIONS
            # Start position
            prog.AddLinearConstraint(f[0][0] == self.startR[0])
            prog.AddLinearConstraint(f[0][1] == self.startR[1])
            prog.AddLinearConstraint(f[1][0] == self.startL[0])
            prog.AddLinearConstraint(f[1][1] == self.startL[1])
            # All other footsteps
            for fNum in range(1, FootstepPlanner.MAXFOOTSTEPS):
                for i in range(self.reachableA.shape[0]):
                    # Constrain footsteps
                    prog.AddLinearConstraint(
                        self.reachableA[i][0] *
                        (f[2 * fNum][0] - f[2 * fNum - 1][0]) +
                        self.reachableA[i][1] *
                        (f[2 * fNum][1] - (f[2 * fNum - 1][1] - self.yOffset))
                        <= self.reachableb[i]
                    )  # Right Footstep (2*fNum) to previous left (2*fNum-1)
                    prog.AddLinearConstraint(
                        self.reachableA[i][0] *
                        (f[2 * fNum + 1][0] - f[2 * fNum][0]) +
                        self.reachableA[i][1] *
                        (f[2 * fNum + 1][1] -
                         (f[2 * fNum][1] + self.yOffset)) <= self.reachableb[i]
                    )  # Left Footstep (2*fNum+1) to previous right (2*fNum)
                    if (self.hasNominal):  # Nominal Regions
                        prog.AddLinearConstraint(
                            self.reachableA[i][0] *
                            (f[2 * fNum][0] - f[2 * fNum - 1][0]) +
                            self.reachableA[i][1] *
                            (f[2 * fNum][1] -
                             (f[2 * fNum - 1][1] - self.yOffset)) +
                            n[2 * fNum] * M <=
                            self.nominal * self.reachableb[i] + M
                        )  # Right Footstep (2*fNum) to previous left (2*fNum-1)
                        prog.AddLinearConstraint(
                            self.reachableA[i][0] *
                            (f[2 * fNum + 1][0] - f[2 * fNum][0]) +
                            self.reachableA[i][1] *
                            (f[2 * fNum + 1][1] -
                             (f[2 * fNum][1] + self.yOffset)) +
                            n[2 * fNum + 1] * M <=
                            self.nominal * self.reachableb[i] + M
                        )  # Left Footstep (2*fNum+1) to previous right (2*fNum)

            # CONSTRAIN TO OBSTACLE FREE REGIONS
            if (self.hasObstacleFree):
                H = []
                for fNum in range(1, FootstepPlanner.MAXFOOTSTEPS):
                    hnR = prog.NewBinaryVariables(self.num_regions, "hr" +
                                                  str(fNum))  # Right Footstep
                    hnL = prog.NewBinaryVariables(self.num_regions, "hl" +
                                                  str(fNum))  # Left Footstep

                    # Constrain each footstep to exactly one convex region
                    prog.AddLinearConstraint(
                        np.sum(hnR) == 1)  # only one is set
                    prog.AddLinearConstraint(
                        np.sum(hnL) == 1)  # only one is set

                    H.append(hnR)
                    H.append(hnL)
                # Constrain the footsteps to the regions
                for fNum in range(1, FootstepPlanner.MAXFOOTSTEPS - 1):
                    for i in range(self.num_regions):
                        for j in range(self.oA[i].shape[0]):
                            prog.AddLinearConstraint(
                                self.oA[i][j][0] * f[2 * fNum][0] +
                                self.oA[i][j][1] * f[2 * fNum][1] +
                                M * H[2 * fNum][i] <=
                                self.ob[i][j] + M)  # Right footstep constraint
                            prog.AddLinearConstraint(
                                self.oA[i][j][0] * f[2 * fNum + 1][0] +
                                self.oA[i][j][1] * f[2 * fNum + 1][1] +
                                M * H[2 * fNum + 1][i] <=
                                self.ob[i][j] + M)  # Left footstep constraint

            # OPTIMAL NUMBER OF FOOTSTEPS
            z = prog.NewBinaryVariables(FootstepPlanner.MAXFOOTSTEPS, "z")
            for fNum in range(FootstepPlanner.MAXFOOTSTEPS - 1):
                prog.AddLinearConstraint(z[fNum] <= z[fNum + 1])
            prog.AddLinearConstraint(z[FootstepPlanner.MAXFOOTSTEPS - 1] -
                                     z[0] == 1)

            for fNum in range(
                    FootstepPlanner.MAXFOOTSTEPS
            ):  # if z[i], then the ith footstep is the same as the final footstep
                prog.AddLinearConstraint(
                    f[2 * fNum][0] +
                    M * z[fNum] <= f[2 * FootstepPlanner.MAXFOOTSTEPS - 2][0] +
                    M)
                prog.AddLinearConstraint(
                    f[2 * fNum][1] +
                    M * z[fNum] <= f[2 * FootstepPlanner.MAXFOOTSTEPS - 2][1] +
                    M)
                prog.AddLinearConstraint(
                    -f[2 * fNum][0] + M *
                    z[fNum] <= -f[2 * FootstepPlanner.MAXFOOTSTEPS - 2][0] + M)
                prog.AddLinearConstraint(
                    -f[2 * fNum][1] + M *
                    z[fNum] <= -f[2 * FootstepPlanner.MAXFOOTSTEPS - 2][1] + M)
                prog.AddLinearConstraint(
                    f[2 * fNum + 1][0] +
                    M * z[fNum] <= f[2 * FootstepPlanner.MAXFOOTSTEPS - 1][0] +
                    M)
                prog.AddLinearConstraint(
                    f[2 * fNum + 1][1] +
                    M * z[fNum] <= f[2 * FootstepPlanner.MAXFOOTSTEPS - 1][1] +
                    M)
                prog.AddLinearConstraint(
                    -f[2 * fNum + 1][0] + M *
                    z[fNum] <= -f[2 * FootstepPlanner.MAXFOOTSTEPS - 1][0] + M)
                prog.AddLinearConstraint(
                    -f[2 * fNum + 1][1] + M *
                    z[fNum] <= -f[2 * FootstepPlanner.MAXFOOTSTEPS - 1][1] + M)

            # ADD COSTS
            # Cost of consecutive footsteps (with nominal regions considered)
            for fNum in range(1, 2 * FootstepPlanner.MAXFOOTSTEPS - 1):
                prog.AddQuadraticCost(((f[fNum][0] - f[fNum + 1][0])**2 +
                                       (f[fNum][1] - f[fNum + 1][1])**2) + 2 *
                                      (1 - n[fNum]))

            # Cost of number of footsteps
            prog.AddLinearCost(-np.sum(z) * 5)

            # Cost of distance of final position to goal
            prog.AddQuadraticCost(
                1 *
                ((f[2 * FootstepPlanner.MAXFOOTSTEPS - 1][0] - self.goal[0])**2
                 + (f[2 * FootstepPlanner.MAXFOOTSTEPS - 1][1] - self.goal[1])
                 **2))
            prog.AddQuadraticCost(
                1 *
                ((f[2 * FootstepPlanner.MAXFOOTSTEPS - 2][0] - self.goal[0])**2
                 + (f[2 * FootstepPlanner.MAXFOOTSTEPS - 2][1] - self.goal[1])
                 **2))

            # SOLVE PROBLEM
            solver = GurobiSolver()
            assert (solver.available())
            assert (solver.solver_type() == mp.SolverType.kGurobi)
            result = solver.Solve(prog)
            assert (result == mp.SolutionResult.kSolutionFound)

            # SAVE SOLUTION
            self.footsteps = []
            self.numFootsteps = 0
            for fNum in range(FootstepPlanner.MAXFOOTSTEPS):
                self.numFootsteps += 1
                self.footsteps.append(prog.GetSolution(f[2 * fNum]))
                self.footsteps.append(prog.GetSolution(f[2 * fNum + 1]))
                if (prog.GetSolution(z[fNum]) == 1):
                    break
            self.footsteps = np.array(self.footsteps)
            self.upToDate = True