Exemple #1
0
def generate_quadratically_penalized_objective_gradient(robot_arm):
    n = robot_arm.n
    s = robot_arm.s

    objective_gradient = generate_objective_gradient_function(robot_arm)

    constraints_func = generate_constraints_function(robot_arm)
    constraint_gradients_func = generate_constraint_gradients_function(robot_arm)

    def quadratic_constraint_gradients(thetas):
        if not thetas.shape == (n * s,):
            raise ValueError('Thetas is not given as 1D-vector, but as: ' + \
                             str(thetas.shape))

        constraint_gradients = constraint_gradients_func(thetas)
        constraints = constraints_func(thetas)
        assert constraint_gradients.shape == (n*s, 2*s,)
        assert constraints.shape == (2*s,)
        return constraints.reshape((1, 2*s,)) * constraint_gradients


    def quadratically_penalized_objective_gradient(thetas, mu):
        if not thetas.shape == (n * s,):
            raise ValueError('Thetas is not given as 1D-vector, but as: ' + \
                             str(thetas.shape))

        grad = objective_gradient(thetas)
        constraint_gradients = 0.5 * mu * quadratic_constraint_gradients(thetas)
        return grad + 0.5 * mu * np.sum(constraint_gradients, axis=1)


    return quadratically_penalized_objective_gradient
def generate_extended_constraints_gradient_function(robot_arm):
    constraints_gradient_function = generate_constraint_gradients_function(
        robot_arm)
    n = robot_arm.n
    s = robot_arm.s

    def extended_constraints_gradient_function(thetas_slack):
        thetas = thetas_slack[:n * s]
        constraints_gradient = constraints_gradient_function(thetas)
        assert constraints_gradient.shape == (n * s, 2 * s)
        downward_extension = np.zeros((2 * n * s, 2 * s))
        constraints_gradient = np.concatenate(
            (constraints_gradient, downward_extension))
        assert constraints_gradient.shape == (3 * n * s, 2 * s)
        additional_constraints_gradient = np.zeros((3 * n * s, 2 * n * s))
        odd_additional_constraints_gradient_upper = np.identity(n * s)
        even_additional_constraints_gradient_upper = -np.identity(n * s)
        additional_constraints_gradient[:n * s, 0::
                                        2] = odd_additional_constraints_gradient_upper
        additional_constraints_gradient[:n * s, 1::
                                        2] = even_additional_constraints_gradient_upper
        additional_constraints_gradient[n * s:, :] = -np.identity(2 * n * s)
        return np.concatenate(
            (constraints_gradient, additional_constraints_gradient), axis=1)

    return extended_constraints_gradient_function
Exemple #3
0
 def setUp(self):
     self.lengths = (
         3,
         2,
         2,
     )
     self.destinations = (
         (5, 4, 6, 4, 5),
         (0, 2, 0.5, -2, -1),
     )
     self.theta = (
         np.pi,
         np.pi / 2,
         0,
     )
     self.thetas = np.ones((3 * 5, ))
     self.robot_arm = RobotArm(self.lengths, self.destinations, self.theta)
     self.constraints_func = generate_constraints_function(self.robot_arm)
     self.constraint_gradients_func = generate_constraint_gradients_function(
         self.robot_arm)
def generate_augmented_lagrangian_objective_gradient(robot_arm,
                                                     lagrange_multiplier, mu):
    s = robot_arm.s

    constraints_function = generate_constraints_function(robot_arm)
    objective_gradient_function = generate_objective_gradient_function(
        robot_arm)
    constraint_gradient_function = generate_constraint_gradients_function(
        robot_arm)

    def augmented_lagrangian_objective_gradient(thetas):
        constraints = constraints_function(thetas)
        objective_gradient = objective_gradient_function(thetas)
        constraint_gradient = constraint_gradient_function(thetas)

        second_part = 0
        for i in range(2 * s):
            second_part += (lagrange_multiplier[i] -
                            mu * constraints[i]) * constraint_gradient[:, i]

        return objective_gradient - second_part

    return augmented_lagrangian_objective_gradient
Exemple #5
0
    def test_constraint_values2(self):
        robot_arm = RobotArm(lengths=(
            1,
            2,
        ),
                             destinations=(
                                 (
                                     1,
                                     -1,
                                 ),
                                 (
                                     1,
                                     1,
                                 ),
                             ))
        thetas = np.array((
            0,
            np.pi / 2,
            np.pi / 2,
            np.pi / 4,
        ))

        constraints_func = generate_constraints_function(robot_arm)
        constraint_values = constraints_func(thetas)
        correct_constraint_values = np.array((
            0,
            1,
            1 - np.sqrt(2),
            np.sqrt(2),
        ))
        testing.assert_array_almost_equal(constraint_values,
                                          correct_constraint_values)

        constraint_gradients_func = generate_constraint_gradients_function(
            robot_arm)
        correct = (
            (
                -2,
                1,
                0,
                0,
            ),
            (
                -2,
                0,
                0,
                0,
            ),
            (
                0,
                0,
                -1 - np.sqrt(2),
                -np.sqrt(2),
            ),
            (
                0,
                0,
                -np.sqrt(2),
                -np.sqrt(2),
            ),
        )
        constraint_grads = constraint_gradients_func(thetas)
        testing.assert_array_almost_equal(constraint_grads, np.array(correct))