def augmented_lagrangian_method(initial_lagrange_multiplier,
                                initial_penalty,
                                initial_tolerance,
                                global_tolerance,
                                max_iter,
                                robot,
                                generate_initial_guess=True,
                                convergence_analysis=False):
    objective_function = generate_objective_function(robot)
    lagrange_multiplier_function = generate_lagrange_multiplier(robot)

    if generate_initial_guess is True:
        thetas = robot.generate_initial_guess()
    elif generate_initial_guess == "random":
        thetas = np.random.rand(robot.n * robot.s) * 2 * np.pi
    else:
        thetas = np.zeros(robot.n * robot.s)

    mu = initial_penalty
    tolerance = initial_tolerance
    lagrange_multiplier = initial_lagrange_multiplier

    # Saving the iterates if convergence analysis is active
    if convergence_analysis is True:
        iterates = thetas.copy().reshape((robot.n * robot.s, 1))

    print("Starting augmented lagrangian method")
    for i in range(max_iter):
        augmented_lagrangian_objective = generate_augmented_lagrangian_objective(
            robot, lagrange_multiplier, mu)
        augmented_lagrangian_objective_gradient = generate_augmented_lagrangian_objective_gradient(
            robot, lagrange_multiplier, mu)
        thetas = BFGS(thetas, augmented_lagrangian_objective,
                      augmented_lagrangian_objective_gradient, tolerance)
        lagrange_multiplier = lagrange_multiplier_function(
            thetas, lagrange_multiplier, mu)
        mu *= 1.4
        tolerance *= 0.7

        # Adding the new thetas to iterates used for convergence analysis
        if convergence_analysis is True:
            iterates = np.concatenate(
                (iterates, thetas.reshape(robot.n * robot.s, 1)), axis=1)

        current_norm = np.linalg.norm(
            augmented_lagrangian_objective_gradient(thetas))
        if current_norm < global_tolerance:
            path_figure(thetas.reshape((robot.n, robot.s), order='F'),
                        robot,
                        show=True)
            print('The objective function evaluates to:',
                  objective_function(thetas))

            print("Augmented lagrangian method successful")
            if convergence_analysis is True:
                return iterates, generate_objective_function(robot)
            else:
                return thetas

    print("Augmented lagrangian method unsuccessful")
    def test_plot_pure_functon(self):
        # Save values before function invocation
        original_destinations = self.robot_arm.destinations.copy()
        original_theta_matrix = self.theta_matrix.copy()

        # Run the pure function
        path_figure(self.theta_matrix, self.robot_arm, show=False)

        # Assert that none of the arguments have been changed
        np.testing.assert_array_equal(original_destinations,
                                      self.robot_arm.destinations)
        np.testing.assert_array_equal(original_theta_matrix, self.theta_matrix)
    def generate_initial_guess(self, show=False, first_configuration=None):
        # How should the arm be positioned before generating a valid
        # configuration which moves the arm to the first destination
        if first_configuration is None:
            last_theta = self.theta
        else:
            assert first_configuration.shape == (self.n,)
            last_theta = first_configuration

        # Calculate initial guess for joint angles which hit all the
        # destinations but not necessarily with little effort
        self.initial_guess = np.empty((self.n, self.s,))
        assert self.initial_guess.shape == (self.n, self.s,)
        for index, destination in enumerate(self.destinations.T):
            last_theta = self.calculate_joint_angles(destination, last_theta)
            self.initial_guess[:, index] = last_theta

        fig = path_figure(self.initial_guess, self, show=False)
        fig.suptitle('Initial guess calculated by BFGS')
        if show is True:
            plt.show()

        return self.initial_guess.reshape((self.n * self.s,), order='F')
 def plot_movement(self):
     joint_positions = self.position(joints=True)
     path = plotting.path_figure(joint_positions, destinations)
     path.show()
示例#5
0
 def test_quadratic_penalty_method(self):
     thetas = quadratic_penalty_method(self.robot_arm)
     path_figure(thetas.reshape((self.n, self.s,), order='F'), self.robot_arm, show=False)
示例#6
0
def extended_augmented_lagrangian_method(initial_lagrange_multiplier,
                                         initial_penalty,
                                         initial_tolerance,
                                         global_tolerance,
                                         max_iter,
                                         robot,
                                         barrier_penalty,
                                         generate_initial_guess=False,
                                         convergence_analysis=False):
    for i in range(robot.s):
        if not is_close_to_config_space(robot.destinations[:, i],
                                        robot.neighbor_tree, robot.lengths):
            print("Destination points not in configuration space")
            return None

    lagrange_multiplier_function = generate_extended_lagrange_multiplier(robot)

    if generate_initial_guess is True:
        thetas_slack = np.zeros(3 * robot.n * robot.s, )
        thetas_slack[:robot.n * robot.s] = robot.generate_initial_guess()
        thetas_slack[robot.n *
                     robot.s::2] = thetas_slack[:robot.n * robot.
                                                s] + robot.angular_constraint
        thetas_slack[robot.n * robot.s +
                     1::2] = robot.angular_constraint - thetas_slack[:robot.n *
                                                                     robot.s]
    elif generate_initial_guess == "random":
        thetas_slack = np.zeros(3 * robot.n * robot.s, )
        thetas_slack[:robot.n *
                     robot.s] = np.random.rand(robot.n * robot.s) * 2 * np.pi
        thetas_slack[robot.n *
                     robot.s::2] = thetas_slack[:robot.n * robot.
                                                s] + robot.angular_constraint
        thetas_slack[robot.n * robot.s +
                     1::2] = robot.angular_constraint - thetas_slack[:robot.n *
                                                                     robot.s]

    else:
        thetas_slack = np.zeros(3 * robot.n * robot.s, )

    mu = initial_penalty
    tolerance = initial_tolerance
    lagrange_multiplier = initial_lagrange_multiplier

    # Saving the iterates if convergence analysis is active
    if convergence_analysis is True:
        iterates = thetas_slack.copy().reshape((3 * robot.n * robot.s, 1))

    print("Starting augmented lagrangian method")
    for i in range(max_iter):
        augmented_lagrangian_objective = generate_extended_augmented_objective(
            robot, barrier_penalty, lagrange_multiplier, mu)
        augmented_lagrangian_objective_gradient = generate_extended_augmented_gradient(
            robot, barrier_penalty, lagrange_multiplier, mu)
        previous_thetas_slack = thetas_slack
        thetas_slack = BFGS(thetas_slack, augmented_lagrangian_objective,
                            augmented_lagrangian_objective_gradient, tolerance)

        lagrange_multiplier = lagrange_multiplier_function(
            thetas_slack, lagrange_multiplier, mu)
        mu *= 5
        tolerance *= 0.9

        # Adding the new thetas to iterates used for convergence analysis
        if convergence_analysis is True:
            iterates = np.concatenate(
                (iterates, thetas_slack.reshape(robot.n * robot.s, 1)), axis=1)

        current_norm = np.linalg.norm(
            augmented_lagrangian_objective_gradient(thetas_slack))
        if current_norm < global_tolerance or np.linalg.norm(
                previous_thetas_slack - thetas_slack) < 0.1:
            path_figure(thetas_slack[:robot.n * robot.s].reshape(
                (robot.n, robot.s), order='F'),
                        robot,
                        show=True)

            print("Augmented lagrangian method successful")
            if convergence_analysis is True:
                return iterates, generate_extended_objective_function
            else:
                return thetas_slack

    path_figure(thetas_slack[:robot.n * robot.s].reshape((robot.n, robot.s),
                                                         order='F'),
                robot,
                show=True)
    print("Augmented lagrangian method unsuccessful")