def cyclic_coordinate_method(loss_function: rosenbrock, start: point, method='golden_search', epsilon=10e-1,
                             k_max=10000) -> list:
    """

    :param loss_function:
    :param start:
    :param method:
    :param epsilon:
    :param k_max:
    :return:
    """
    x, M, k = [start], len(start), 0

    while True:
        # if meet the termination conditions then break
        gradient = loss_function.g(x[k])
        if k > k_max or np.linalg.norm(gradient) < epsilon: break
        # find the new x
        direction = [0] * M
        direction[np.mod(k, M)] = 1
        if method == 'golden_search':
            step = golden_search(loss_function, x[k], direction)
        elif method == 'fibonacci_search':
            step = fibonacci_search(loss_function, x[k], direction)
        elif method == 'dichotomous_search':
            step = dichotomous_search(loss_function, x[k], direction)
        else:
            return x
        x.append(x[k] + point(direction[0] * step, direction[1] * step))
        k += 1

    return x
def DFP(loss_function: rosenbrock,
        start: point,
        method='golden_search',
        epsilon=10e-2,
        k_max=10000) -> list:
    """
    Davidon Fletcher Powell, quasi_newton_method
    :param loss_function:
    :param start:
    :param step:
    :param epsilon:
    :param k_max:
    :return:
    """
    x, k = [start], 0
    D = np.identity(len(start))  # Identity matrix

    while True:
        # if meet the termination conditions then break
        gradient = loss_function.g(x[k])
        if k > k_max or np.linalg.norm(gradient) < epsilon: break

        # find the new x
        gradient = gradient / np.linalg.norm(gradient)
        direction = -np.matmul(D, gradient)
        if method == 'golden_search':
            step = golden_search(loss_function, x[k], direction)
        elif method == 'fibonacci_search':
            step = fibonacci_search(loss_function, x[k], direction)
        elif method == 'dichotomous_search':
            step = dichotomous_search(loss_function, x[k], direction)
        else:
            return x
        p = step * direction
        x.append(x[k] + point(p[0], p[1]))
        # update the D
        yk = np.mat(
            loss_function.g(x[k + 1]) /
            np.linalg.norm(loss_function.g(x[k + 1])) - gradient).T
        pk = np.mat(p).T
        Dk = np.mat(D)
        D = D + np.array((pk * pk.T) / (pk.T * yk) - (Dk * yk * yk.T * Dk) /
                         (yk.T * Dk * yk))
        k += 1

    return x
def BFGS(loss_function: rosenbrock,
         start: point,
         method='golden_search',
         epsilon=10e-2,
         k_max=10000) -> list:
    """
    Broyden Fletcher Goldfarb Shanno
    :param loss_function:
    :param start:
    :param method:
    :param epsilon:
    :param k_max:
    :return:
    """
    x, k = [start], 0
    B = np.identity(len(start))  # Identity matrix

    while True:
        # if meet the termination conditions then break
        gradient = loss_function.g(x[k])
        if k > k_max or np.linalg.norm(gradient) < epsilon: break

        # find the new x
        gradient = gradient / np.linalg.norm(gradient)
        direction = -np.matmul(np.linalg.inv(B), gradient)
        if method == 'golden_search':
            step = golden_search(loss_function, x[k], direction)
        elif method == 'fibonacci_search':
            step = fibonacci_search(loss_function, x[k], direction)
        elif method == 'dichotomous_search':
            step = dichotomous_search(loss_function, x[k], direction)
        else:
            return x
        p = step * direction
        x.append(x[k] + point(p[0], p[1]))
        # update the B
        yk = np.mat(
            loss_function.g(x[k + 1]) /
            np.linalg.norm(loss_function.g(x[k + 1])) - gradient).T
        pk = np.mat(p).T
        Bk = np.mat(B)
        B = B + np.array((yk * yk.T) / (yk.T * pk) - (Bk * pk * pk.T * Bk) /
                         (pk.T * Bk * pk))
        k += 1

    return x
def conjugate_gradient(loss_function: rosenbrock,
                       start: point,
                       method='golden_search',
                       epsilon=10e-2,
                       k_max=10000) -> list:
    """

    :param loss_function:
    :param start:
    :param method:
    :param epsilon:
    :param k_max:
    :return:
    """
    x, direction, k = [
        start
    ], -1 * loss_function.g(start) / np.linalg.norm(loss_function.g(start)), 0

    while True:
        # if meet the termination conditions then break
        gradient_old = loss_function.g(x[k]) / np.linalg.norm(
            loss_function.g(x[k]))
        if np.linalg.norm(loss_function.g(x[k])) < epsilon or k > k_max: break

        # find the new x
        if method == 'golden_search':
            step = golden_search(loss_function, x[k], direction)
        elif method == 'fibonacci_search':
            step = fibonacci_search(loss_function, x[k], direction)
        elif method == 'dichotomous_search':
            step = dichotomous_search(loss_function, x[k], direction)
        else:
            return x
        x.append(x[k] + point(direction[0] * step, direction[1] * step))

        # update the direction
        gradient_new = loss_function.g(x[k + 1]) / np.linalg.norm(
            loss_function.g(x[k + 1]))
        alpha = np.dot(gradient_new, gradient_new) / np.dot(
            gradient_old, gradient_old)
        direction = -gradient_new + alpha * direction
        k += 1

    return x
def Newton_method(loss_function: rosenbrock,
                  start: point,
                  method='golden_search',
                  epsilon=10e-2,
                  k_max=10000) -> list:
    """

    :param loss_function:
    :param start:
    :param step:
    :param epsilon:
    :param k_max:
    :return:
    """
    x, k = [start], 0

    while True:
        # if meet the termination conditions then break
        gradient = loss_function.g(x[k])
        if k > k_max or np.linalg.norm(gradient) < epsilon: break

        # find the new x
        inverse = np.linalg.inv(loss_function.H(x[k]))
        direction = -np.matmul(inverse, gradient)
        if method == 'golden_search':
            step = golden_search(loss_function, x[k], direction)
        elif method == 'fibonacci_search':
            step = fibonacci_search(loss_function, x[k], direction)
        elif method == 'dichotomous_search':
            step = dichotomous_search(loss_function, x[k], direction)
        else:
            return x
        p = step * direction
        x.append(x[k] + point(p[0], p[1]))
        k += 1

    return x