def mk_costs(ndim=2):
    costs = {
        'Well-conditioned quadratic': mk_quad(.7, ndim=ndim),
        'Ill-conditioned quadratic': mk_quad(.02, ndim=ndim),
        'Well-conditioned Gaussian': mk_gauss(.7, ndim=ndim),
        'Ill-conditioned Gaussian': mk_gauss(.02, ndim=ndim),
        'Rosenbrock  ': (rosenbrock, rosenbrock_prime, rosenbrock_hessian),
    }

    rng = np.random.RandomState(0)
    starting_points = 4 * rng.rand(20, ndim) - 2
    if ndim > 100:
        starting_points = starting_points[:10]
    return costs, starting_points
def mk_costs(ndim=2):
    costs = {
        "Well-conditioned quadratic": mk_quad(0.7, ndim=ndim),
        "Ill-conditioned quadratic": mk_quad(0.02, ndim=ndim),
        "Well-conditioned Gaussian": mk_gauss(0.7, ndim=ndim),
        "Ill-conditioned Gaussian": mk_gauss(0.02, ndim=ndim),
        "Rosenbrock  ": (rosenbrock, rosenbrock_prime, rosenbrock_hessian),
    }

    rng = np.random.RandomState(0)
    starting_points = 4 * rng.rand(20, ndim) - 2
    if ndim > 100:
        starting_points = starting_points[:10]
    return costs, starting_points
        x, y = X
        all_x_i.append(x)
        all_y_i.append(y)
        all_f_i.append(f(X))
    optimize.fmin(f, x0, callback=store, ftol=1e-12)
    return all_x_i, all_y_i, all_f_i




###############################################################################
# Run different optimizers on these problems
levels = dict()

for index, ((f, f_prime, hessian), optimizer) in enumerate((
                (mk_quad(.7), gradient_descent),
                (mk_quad(.7), gradient_descent_adaptative),
                (mk_quad(.02), gradient_descent),
                (mk_quad(.02), gradient_descent_adaptative),
                (mk_gauss(.02), gradient_descent_adaptative),
                ((rosenbrock, rosenbrock_prime, rosenbrock_hessian),
                                    gradient_descent_adaptative),
                (mk_gauss(.02), conjugate_gradient),
                ((rosenbrock, rosenbrock_prime, rosenbrock_hessian),
                                    conjugate_gradient),
                (mk_quad(.02), newton_cg),
                (mk_gauss(.02), newton_cg),
                ((rosenbrock, rosenbrock_prime, rosenbrock_hessian),
                                    newton_cg),
                (mk_quad(.02), bfgs),
                (mk_gauss(.02), bfgs),
        x, y = X
        all_x_i.append(x)
        all_y_i.append(y)
        all_f_i.append(f(X))
    optimize.minimize(f, x0, method="Nelder-Mead", callback=store, options={"ftol": 1e-12})
    return all_x_i, all_y_i, all_f_i




###############################################################################
# Run different optimizers on these problems
levels = dict()

for index, ((f, f_prime, hessian), optimizer) in enumerate((
                (mk_quad(.7), gradient_descent),
                (mk_quad(.7), gradient_descent_adaptative),
                (mk_quad(.02), gradient_descent),
                (mk_quad(.02), gradient_descent_adaptative),
                (mk_gauss(.02), gradient_descent_adaptative),
                ((rosenbrock, rosenbrock_prime, rosenbrock_hessian),
                                    gradient_descent_adaptative),
                (mk_gauss(.02), conjugate_gradient),
                ((rosenbrock, rosenbrock_prime, rosenbrock_hessian),
                                    conjugate_gradient),
                (mk_quad(.02), newton_cg),
                (mk_gauss(.02), newton_cg),
                ((rosenbrock, rosenbrock_prime, rosenbrock_hessian),
                                    newton_cg),
                (mk_quad(.02), bfgs),
                (mk_gauss(.02), bfgs),