def augmented_lagrangian_method(initial_lagrange_multiplier, initial_penalty, initial_tolerance, global_tolerance, max_iter, robot, generate_initial_guess=True, convergence_analysis=False): objective_function = generate_objective_function(robot) lagrange_multiplier_function = generate_lagrange_multiplier(robot) if generate_initial_guess is True: thetas = robot.generate_initial_guess() elif generate_initial_guess == "random": thetas = np.random.rand(robot.n * robot.s) * 2 * np.pi else: thetas = np.zeros(robot.n * robot.s) mu = initial_penalty tolerance = initial_tolerance lagrange_multiplier = initial_lagrange_multiplier # Saving the iterates if convergence analysis is active if convergence_analysis is True: iterates = thetas.copy().reshape((robot.n * robot.s, 1)) print("Starting augmented lagrangian method") for i in range(max_iter): augmented_lagrangian_objective = generate_augmented_lagrangian_objective( robot, lagrange_multiplier, mu) augmented_lagrangian_objective_gradient = generate_augmented_lagrangian_objective_gradient( robot, lagrange_multiplier, mu) thetas = BFGS(thetas, augmented_lagrangian_objective, augmented_lagrangian_objective_gradient, tolerance) lagrange_multiplier = lagrange_multiplier_function( thetas, lagrange_multiplier, mu) mu *= 1.4 tolerance *= 0.7 # Adding the new thetas to iterates used for convergence analysis if convergence_analysis is True: iterates = np.concatenate( (iterates, thetas.reshape(robot.n * robot.s, 1)), axis=1) current_norm = np.linalg.norm( augmented_lagrangian_objective_gradient(thetas)) if current_norm < global_tolerance: path_figure(thetas.reshape((robot.n, robot.s), order='F'), robot, show=True) print('The objective function evaluates to:', objective_function(thetas)) print("Augmented lagrangian method successful") if convergence_analysis is True: return iterates, generate_objective_function(robot) else: return thetas print("Augmented lagrangian method unsuccessful")
def extended_augmented_lagrangian_method(initial_lagrange_multiplier, initial_penalty, initial_tolerance, global_tolerance, max_iter, robot, barrier_penalty, generate_initial_guess=False, convergence_analysis=False): for i in range(robot.s): if not is_close_to_config_space(robot.destinations[:, i], robot.neighbor_tree, robot.lengths): print("Destination points not in configuration space") return None lagrange_multiplier_function = generate_extended_lagrange_multiplier(robot) if generate_initial_guess is True: thetas_slack = np.zeros(3 * robot.n * robot.s, ) thetas_slack[:robot.n * robot.s] = robot.generate_initial_guess() thetas_slack[robot.n * robot.s::2] = thetas_slack[:robot.n * robot. s] + robot.angular_constraint thetas_slack[robot.n * robot.s + 1::2] = robot.angular_constraint - thetas_slack[:robot.n * robot.s] elif generate_initial_guess == "random": thetas_slack = np.zeros(3 * robot.n * robot.s, ) thetas_slack[:robot.n * robot.s] = np.random.rand(robot.n * robot.s) * 2 * np.pi thetas_slack[robot.n * robot.s::2] = thetas_slack[:robot.n * robot. s] + robot.angular_constraint thetas_slack[robot.n * robot.s + 1::2] = robot.angular_constraint - thetas_slack[:robot.n * robot.s] else: thetas_slack = np.zeros(3 * robot.n * robot.s, ) mu = initial_penalty tolerance = initial_tolerance lagrange_multiplier = initial_lagrange_multiplier # Saving the iterates if convergence analysis is active if convergence_analysis is True: iterates = thetas_slack.copy().reshape((3 * robot.n * robot.s, 1)) print("Starting augmented lagrangian method") for i in range(max_iter): augmented_lagrangian_objective = generate_extended_augmented_objective( robot, barrier_penalty, lagrange_multiplier, mu) augmented_lagrangian_objective_gradient = generate_extended_augmented_gradient( robot, barrier_penalty, lagrange_multiplier, mu) previous_thetas_slack = thetas_slack thetas_slack = BFGS(thetas_slack, augmented_lagrangian_objective, augmented_lagrangian_objective_gradient, tolerance) lagrange_multiplier = lagrange_multiplier_function( thetas_slack, lagrange_multiplier, mu) mu *= 5 tolerance *= 0.9 # Adding the new thetas to iterates used for convergence analysis if convergence_analysis is True: iterates = np.concatenate( (iterates, thetas_slack.reshape(robot.n * robot.s, 1)), axis=1) current_norm = np.linalg.norm( augmented_lagrangian_objective_gradient(thetas_slack)) if current_norm < global_tolerance or np.linalg.norm( previous_thetas_slack - thetas_slack) < 0.1: path_figure(thetas_slack[:robot.n * robot.s].reshape( (robot.n, robot.s), order='F'), robot, show=True) print("Augmented lagrangian method successful") if convergence_analysis is True: return iterates, generate_extended_objective_function else: return thetas_slack path_figure(thetas_slack[:robot.n * robot.s].reshape((robot.n, robot.s), order='F'), robot, show=True) print("Augmented lagrangian method unsuccessful")