def eval_mejora_m(f,xk,dk): #modelo: qk = lambda x: f(xk) + gradiente(f,xk).T@(x-xk) + 0.5*(x-xk).T@(hessiano(f,xk)@(x-xk)) mk = lambda d: qk(xk + d) ared = f(xk) - f(xk + dk) pred = mk(0) - mk(dk) return ared/pred
def punto_cauchy(f,x,Dk): gk = gradiente(f,x) Bk = hessiano(f,x) gkTBkgk = gk.T@(Bk@gk) min_cuadratica = np.power(norm(gk),3)/(Dk*gkTBkgk) tk = 1 if gkTBkgk <= 0 else np.min([min_cuadratica,1]) dk = -tk*(Dk*gk/norm(gk)) return dk
def metodo_de_newton(f, x0, e, kmax, metodo): k = 0 x = x0 while (gradiente(f, x).all() > e and k < kmax): d = solve(hessiano(f, x), -gradiente(f, x)) t = metodo(f, x, d) x = x + t * d k = k + 1 return x
def metodo_de_Levenberg_Marquardt(f, x0, e, kmax, g, metodo): k = 0 x = x0 while (norm(gradiente(f, x)) > e and k < kmax): B = hessiano(f, x) mu = np.min(eigvals(B)) if (mu <= 0): B += (-mu + g) * np.eye(B.shape[0]) d = solve(B, -gradiente(f, x)) t = metodo(f, x, d) x = x + t * d k = k + 1 return x