def augmented_lagrangian_method(initial_lagrange_multiplier,
                                initial_penalty,
                                initial_tolerance,
                                global_tolerance,
                                max_iter,
                                robot,
                                generate_initial_guess=True,
                                convergence_analysis=False):
    objective_function = generate_objective_function(robot)
    lagrange_multiplier_function = generate_lagrange_multiplier(robot)

    if generate_initial_guess is True:
        thetas = robot.generate_initial_guess()
    elif generate_initial_guess == "random":
        thetas = np.random.rand(robot.n * robot.s) * 2 * np.pi
    else:
        thetas = np.zeros(robot.n * robot.s)

    mu = initial_penalty
    tolerance = initial_tolerance
    lagrange_multiplier = initial_lagrange_multiplier

    # Saving the iterates if convergence analysis is active
    if convergence_analysis is True:
        iterates = thetas.copy().reshape((robot.n * robot.s, 1))

    print("Starting augmented lagrangian method")
    for i in range(max_iter):
        augmented_lagrangian_objective = generate_augmented_lagrangian_objective(
            robot, lagrange_multiplier, mu)
        augmented_lagrangian_objective_gradient = generate_augmented_lagrangian_objective_gradient(
            robot, lagrange_multiplier, mu)
        thetas = BFGS(thetas, augmented_lagrangian_objective,
                      augmented_lagrangian_objective_gradient, tolerance)
        lagrange_multiplier = lagrange_multiplier_function(
            thetas, lagrange_multiplier, mu)
        mu *= 1.4
        tolerance *= 0.7

        # Adding the new thetas to iterates used for convergence analysis
        if convergence_analysis is True:
            iterates = np.concatenate(
                (iterates, thetas.reshape(robot.n * robot.s, 1)), axis=1)

        current_norm = np.linalg.norm(
            augmented_lagrangian_objective_gradient(thetas))
        if current_norm < global_tolerance:
            path_figure(thetas.reshape((robot.n, robot.s), order='F'),
                        robot,
                        show=True)
            print('The objective function evaluates to:',
                  objective_function(thetas))

            print("Augmented lagrangian method successful")
            if convergence_analysis is True:
                return iterates, generate_objective_function(robot)
            else:
                return thetas

    print("Augmented lagrangian method unsuccessful")
def generate_augmented_lagrangian_objective(robot_arm, lagrange_multiplier,
                                            mu):
    objective_function = generate_objective_function(robot_arm)
    constraint_function = generate_constraints_function(robot_arm)

    def augmented_lagrangian_objective(thetas):
        objective = objective_function(thetas)
        constraints = constraint_function(thetas)
        return objective - np.sum(
            lagrange_multiplier *
            constraints) + mu / 2 * np.sum(constraints * constraints)

    return augmented_lagrangian_objective
Exemple #3
0
def generate_extended_objective_function(robot_arm, barrier_penalty):
    objective_function = generate_objective_function(robot_arm)
    n = robot_arm.n
    s = robot_arm.s

    def extended_objective(thetas_slack):
        thetas = thetas_slack[:n * s]
        slack = thetas_slack[n * s:]
        #slack += 1e-2*np.isclose(slack, np.zeros(2*robot_arm.n*robot_arm.s))
        #while np.any(slack <= 0):
        #    slack += (slack <= 0)*1e-2
        return objective_function(thetas) - barrier_penalty * np.sum(
            np.log(slack))

    return extended_objective
Exemple #4
0
 def setUp(self):
     self.thetas = np.array((
         0.5,
         4,
         2,
         5,
         3,
         6,
     ))
     self.thetas.setflags(write=False)
     lengths = (
         1,
         1,
     )
     destinations = ((0, 0, 0), (0, 0, 0))
     self.robot_arm = RobotArm(lengths, destinations)
     self.objective = generate_objective_function(self.robot_arm)
Exemple #5
0
def generate_quadratically_penalized_objective(robot_arm):
    '''
    Given a RobotArm object with a valid joint lenghts and destinations
    this function returns a quadratically penalized objective function
    taking in two parameters: thetas and mu
    Function: R^(ns) x R --> R
    '''
    n = robot_arm.n
    s = robot_arm.s
    objective = generate_objective_function(robot_arm)
    constraints_func = generate_constraints_function(robot_arm)

    def quadratically_penalized_objective(thetas, mu):
        if not thetas.shape == (n * s,):
            raise ValueError('Thetas is not given as 1D-vector, but as: ' + \
                             str(thetas.shape))

        return objective(thetas) + \
               0.5 * mu * np.sum(constraints_func(thetas) ** 2)


    return quadratically_penalized_objective
Exemple #6
0
    def test_objective_value2(self):
        robot_arm = RobotArm(lengths=(
            1,
            2,
        ),
                             destinations=(
                                 (
                                     1,
                                     -1,
                                 ),
                                 (
                                     1,
                                     1,
                                 ),
                             ))
        thetas = np.array((
            0,
            np.pi / 2,
            np.pi / 2,
            np.pi / 4,
        ))

        objective_func = generate_objective_function(robot_arm)
        objective_value = objective_func(thetas)
        self.assertEqual(objective_value, np.pi**2 * 5 / 16)

        objective_gradient_func = generate_objective_gradient_function(
            robot_arm)
        objective_gradient = objective_gradient_func(thetas)
        testing.assert_array_equal(
            objective_gradient,
            np.array((
                -np.pi,
                np.pi / 2,
                np.pi,
                -np.pi / 2,
            )))