def augmented_lagrangian_method(initial_lagrange_multiplier,
                                initial_penalty,
                                initial_tolerance,
                                global_tolerance,
                                max_iter,
                                robot,
                                generate_initial_guess=True,
                                convergence_analysis=False):
    objective_function = generate_objective_function(robot)
    lagrange_multiplier_function = generate_lagrange_multiplier(robot)

    if generate_initial_guess is True:
        thetas = robot.generate_initial_guess()
    elif generate_initial_guess == "random":
        thetas = np.random.rand(robot.n * robot.s) * 2 * np.pi
    else:
        thetas = np.zeros(robot.n * robot.s)

    mu = initial_penalty
    tolerance = initial_tolerance
    lagrange_multiplier = initial_lagrange_multiplier

    # Saving the iterates if convergence analysis is active
    if convergence_analysis is True:
        iterates = thetas.copy().reshape((robot.n * robot.s, 1))

    print("Starting augmented lagrangian method")
    for i in range(max_iter):
        augmented_lagrangian_objective = generate_augmented_lagrangian_objective(
            robot, lagrange_multiplier, mu)
        augmented_lagrangian_objective_gradient = generate_augmented_lagrangian_objective_gradient(
            robot, lagrange_multiplier, mu)
        thetas = BFGS(thetas, augmented_lagrangian_objective,
                      augmented_lagrangian_objective_gradient, tolerance)
        lagrange_multiplier = lagrange_multiplier_function(
            thetas, lagrange_multiplier, mu)
        mu *= 1.4
        tolerance *= 0.7

        # Adding the new thetas to iterates used for convergence analysis
        if convergence_analysis is True:
            iterates = np.concatenate(
                (iterates, thetas.reshape(robot.n * robot.s, 1)), axis=1)

        current_norm = np.linalg.norm(
            augmented_lagrangian_objective_gradient(thetas))
        if current_norm < global_tolerance:
            path_figure(thetas.reshape((robot.n, robot.s), order='F'),
                        robot,
                        show=True)
            print('The objective function evaluates to:',
                  objective_function(thetas))

            print("Augmented lagrangian method successful")
            if convergence_analysis is True:
                return iterates, generate_objective_function(robot)
            else:
                return thetas

    print("Augmented lagrangian method unsuccessful")
Пример #2
0
 def test_history(self):
     x0 = -np.array([1.3, 2.7])
     opt = BFGS(self.oracle,
                x0,
                line_search_options={
                    'method': 'Constant',
                    'c': 1.0
                },
                tolerance=1e-6)
     opt.run(10)
     x_min = opt.hist['x_star']
     func_steps = [25.635000000000005, 22.99, -9.48707349065929, -9.5]
     grad_norm_steps = [11.629703349613008, 11.4, 0.22738961577617722, 0.0]
     time_steps = [0.0] * 4  # Dummy values
     x_steps = [
         np.array([-1.3, -2.7]),
         np.array([1.0, 8.7]),
         np.array([1.0, 2.88630519]),
         np.array([1., 3.])
     ]
     true_history = dict(grad_norm=grad_norm_steps,
                         time=time_steps,
                         x=x_steps,
                         func=func_steps)
     check_equal_histories(opt.hist, true_history)
Пример #3
0
    def test_quality(self):
        opt = BFGS(self.oracle, self.x0, tolerance=1e-5)
        opt.run(5)
        x_min = opt.hist['x_star']
        # x_min, message, _ = LBFGS(self.oracle, self.x0, tolerance=1e-5)
        f_min = self.oracle.func(x_min)

        g_k_norm_sqr = norm(self.A.dot(x_min) - self.b, 2)**2
        g_0_norm_sqr = norm(self.A.dot(self.x0) - self.b, 2)**2
        self.assertLessEqual(g_k_norm_sqr, 1e-5 * g_0_norm_sqr)
        self.assertLessEqual(abs(f_min - self.f_star), 1e-5 * g_0_norm_sqr)
Пример #4
0
def quadratic_penalty_method(robot_arm):
    n = robot_arm.n
    s = robot_arm.s

    Q = generate_quadratically_penalized_objective(robot_arm)
    grad_Q = generate_quadratically_penalized_objective_gradient(robot_arm)

    thetas = robot_arm.generate_initial_guess(show=False)

    mu = 1
    tau = 1e-4

    def get_f(mu):
        return partial(Q, mu=mu)

    def get_grad_f(mu):
        return partial(grad_Q, mu=mu)

    print('Starting quadratic penalty method')
    for yolo in range(1000):
        f = get_f(mu)
        grad_f = get_grad_f(mu)
        try:
            thetas = BFGS(thetas, f, grad_f, tau)
        except MaximumIterationError:
            print('Reached MaximumIterationError in loop number {}'.format(yolo))
            break
        mu = 1.5 * mu
        tau = 0.5 * tau

    return thetas
Пример #5
0
 def test_line_search_options(self):
     """Check if argument `line_search_options` is supported."""
     BFGS(self.oracle,
          self.x0,
          line_search_options={
              'method': 'Wolfe',
              'c1': 1e-4,
              'c2': 0.9
          })
Пример #6
0
    def calculate_joint_angles(self, p, theta=None):
        '''
        Takes in a destination p and an initial guess theta and returns a
        theta which maps closely to the point p
        '''
        # If theta is not provided, use the zero as initial guess
        if theta is None:
            theta = np.zeros(self.n)
        assert theta.ndim == 1

        # Check if the goal is out of reach
        p_length = np.linalg.norm(p)

        if p_length >= self.reach:
            print("Point outside interior of configuration space. Minimum found analytically.")
            return self.move_closest_to_out_of_reach(p)
        elif self.annulus and p_length <= self.inner_reach:
            print("Point outside interior of configuration space. Minimum found analytically.")
            return self.move_closest_to_out_of_reach(p, internal=True)
        else:
            f = self.generate_f(p)
            gradient = self.generate_gradient(p)
            return BFGS(theta, f, gradient, self.precision, initial_guess=True)
Пример #7
0
 def test_max_iter(self):
     """Check if argument `max_iter` is supported."""
     opt = BFGS(self.oracle, self.x0)
     opt.run(max_iter=0)
Пример #8
0
 def test_tolerance(self):
     """Check if argument `tolerance` is supported."""
     BFGS(self.oracle, self.x0, tolerance=1e-5)
Пример #9
0
 def test_default(self):
     """Check if everything works correctly with default parameters."""
     opt = BFGS(self.oracle, self.x0)
     opt.run()
Пример #10
0
def run_all_methods(oracle,
                    sketch_sizes,
                    mat,
                    max_iter,
                    output_folder,
                    x_0=None,
                    sigma_tolerance=1e-10,
                    method_tolerance=1e-16,
                    stopping_criteria='func_abs',
                    add_text='',
                    random_state=None,
                    linesearch_methods=['Wolfe'],
                    methods=None,
                    overwrite=False):

    import os
    os.system('mkdir -p {}'.format(output_folder))

    if methods is None:
        methods = ['svd', 'svd-no-sigma', 'gauss', 'coord', 'bfgs', 'nesterov']

    np.random.seed(random_state)
    if x_0 is None:
        x_0 = np.random.normal(loc=0., scale=1., size=mat.shape[1])

    add_text = '_{}'.format(add_text) if add_text != '' else ''

    def run_rbfgs(mat_distr, line_search_options, distr_name, **kwargs):
        output_file = '{}/rbfgs_{}_linesearch={}{}.pkl'.format(
            output_folder, distr_name, line_search_options['method'].lower(),
            add_text)
        run_rbfgs_experiment(oracle,
                             x_0,
                             mat_distr=mat_distr,
                             sketch_sizes=sketch_sizes,
                             max_iter=max_iter,
                             tolerance=method_tolerance,
                             stopping_criteria=stopping_criteria,
                             output_file=output_file,
                             overwrite=overwrite,
                             **kwargs)

    if 'svd' in methods or 'svd-no-sigma' in methods:
        try:
            with open('{}/svd.pkl'.format(output_folder), 'rb') as file:
                U, sigma_diag, Vh = pickle.load(file)
                print('Read SVD from {}/svd.pkl'.format(output_folder))
        except FileNotFoundError:
            print('Computing SVD...', end='')
            U, sigma_diag, Vh = scipy.linalg.svd(mat.T, full_matrices=False)
            print('Done')
            with open('{}/svd.pkl'.format(output_folder), 'wb') as file:
                pickle.dump((U, sigma_diag, Vh), file)
        nondeg_count = (sigma_diag > sigma_tolerance).sum()
        print('Singular values above tolerance: {}'.format(nondeg_count))
        print()

    if 'svd' in methods:
        print('RBFGS-SVD sketch... ', end='')
        mat_distr = CustomDiscrete(U[:, :nondeg_count], sort_ids=True)
        for ls in linesearch_methods:
            run_rbfgs(mat_distr, {'method': ls}, 'svd')
        print('Done')

    if 'svd-no-sigma' in methods:
        print('RBFGS-SVD sketch no sigma... ', end='')
        mat_distr = CustomDiscrete(U, sort_ids=True)
        for ls in linesearch_methods:
            run_rbfgs(mat_distr, {'method': ls}, 'svd-no-sigma')
        print('Done')

    if 'gauss' in methods:
        print('RBFGS-gauss... ', end='')
        mat_distr = Gaussian(-1., 1., [mat.shape[1], 1])
        for ls in linesearch_methods:
            run_rbfgs(mat_distr, {'method': ls}, 'gauss')
        print('Done')

    if 'coord' in methods:
        print('RBFGS-coord...', end='')
        mat_distr = CustomDiscrete(np.eye(mat.shape[1]), sort_ids=True)
        for ls in linesearch_methods:
            run_rbfgs(mat_distr, {'method': ls}, 'coord')
        print('Done')

    if 'bfgs' in methods:
        print('BFGS... ', end='')
        for ls in linesearch_methods:
            if 'bfgs_linesearch={}{}.pkl'\
                .format(ls.lower(), add_text) not in os.listdir(output_folder):

                method = BFGS(oracle,
                              x_0,
                              tolerance=method_tolerance,
                              stopping_criteria=stopping_criteria,
                              line_search_options={'method': 'Wolfe'})
                method.run(max_iter)
                method.oracle = None
                method.H_k = None
                method.x_0 = None
                with open('{}/bfgs_linesearch={}{}.pkl'\
                          .format(output_folder, ls.lower(), add_text), 'wb') as file:
                    pickle.dump(method, file)
        print('Done')

    print()
    print('All runs completed.')
Пример #11
0
def extended_augmented_lagrangian_method(initial_lagrange_multiplier,
                                         initial_penalty,
                                         initial_tolerance,
                                         global_tolerance,
                                         max_iter,
                                         robot,
                                         barrier_penalty,
                                         generate_initial_guess=False,
                                         convergence_analysis=False):
    for i in range(robot.s):
        if not is_close_to_config_space(robot.destinations[:, i],
                                        robot.neighbor_tree, robot.lengths):
            print("Destination points not in configuration space")
            return None

    lagrange_multiplier_function = generate_extended_lagrange_multiplier(robot)

    if generate_initial_guess is True:
        thetas_slack = np.zeros(3 * robot.n * robot.s, )
        thetas_slack[:robot.n * robot.s] = robot.generate_initial_guess()
        thetas_slack[robot.n *
                     robot.s::2] = thetas_slack[:robot.n * robot.
                                                s] + robot.angular_constraint
        thetas_slack[robot.n * robot.s +
                     1::2] = robot.angular_constraint - thetas_slack[:robot.n *
                                                                     robot.s]
    elif generate_initial_guess == "random":
        thetas_slack = np.zeros(3 * robot.n * robot.s, )
        thetas_slack[:robot.n *
                     robot.s] = np.random.rand(robot.n * robot.s) * 2 * np.pi
        thetas_slack[robot.n *
                     robot.s::2] = thetas_slack[:robot.n * robot.
                                                s] + robot.angular_constraint
        thetas_slack[robot.n * robot.s +
                     1::2] = robot.angular_constraint - thetas_slack[:robot.n *
                                                                     robot.s]

    else:
        thetas_slack = np.zeros(3 * robot.n * robot.s, )

    mu = initial_penalty
    tolerance = initial_tolerance
    lagrange_multiplier = initial_lagrange_multiplier

    # Saving the iterates if convergence analysis is active
    if convergence_analysis is True:
        iterates = thetas_slack.copy().reshape((3 * robot.n * robot.s, 1))

    print("Starting augmented lagrangian method")
    for i in range(max_iter):
        augmented_lagrangian_objective = generate_extended_augmented_objective(
            robot, barrier_penalty, lagrange_multiplier, mu)
        augmented_lagrangian_objective_gradient = generate_extended_augmented_gradient(
            robot, barrier_penalty, lagrange_multiplier, mu)
        previous_thetas_slack = thetas_slack
        thetas_slack = BFGS(thetas_slack, augmented_lagrangian_objective,
                            augmented_lagrangian_objective_gradient, tolerance)

        lagrange_multiplier = lagrange_multiplier_function(
            thetas_slack, lagrange_multiplier, mu)
        mu *= 5
        tolerance *= 0.9

        # Adding the new thetas to iterates used for convergence analysis
        if convergence_analysis is True:
            iterates = np.concatenate(
                (iterates, thetas_slack.reshape(robot.n * robot.s, 1)), axis=1)

        current_norm = np.linalg.norm(
            augmented_lagrangian_objective_gradient(thetas_slack))
        if current_norm < global_tolerance or np.linalg.norm(
                previous_thetas_slack - thetas_slack) < 0.1:
            path_figure(thetas_slack[:robot.n * robot.s].reshape(
                (robot.n, robot.s), order='F'),
                        robot,
                        show=True)

            print("Augmented lagrangian method successful")
            if convergence_analysis is True:
                return iterates, generate_extended_objective_function
            else:
                return thetas_slack

    path_figure(thetas_slack[:robot.n * robot.s].reshape((robot.n, robot.s),
                                                         order='F'),
                robot,
                show=True)
    print("Augmented lagrangian method unsuccessful")