def metodo_cuasi_newton(f,x0,H0,e,kmax,metodo = Armijo): k = 0 xk = x0 H = H0 while(norm(gradiente(f,xk)) > e and k < kmax): d = -H@gradiente(f,xk) tk = metodo(f,xk,d) x_next = xk + tk*d p = x_next - xk q = gradiente(f,x_next) - gradiente(f,xk) if(norm(q) >= e): #p y q son reales. H = H + (np.outer(p,p.T))/(p.T@q) - (H@((np.outer(q,q.T))@H))/(q.T@(H@q)) k = k+1 xk = x_next else: return 10e8, np.inf return k,xk
def metodo_cuasi_newton(f, x0, H0, e, kmax, metodo): k = 0 xk = x0 H = H0 while (norm(gradiente(f, xk)) > e and k < kmax): d = -H @ gradiente(f, xk) tk = metodo(f, xk, d) x_next = xk + tk * d p = x_next - xk q = gradiente(f, x_next) - gradiente(f, xk) if (norm(q) >= e): #p y q son reales. H = H + (np.outer(p, p.T)) / (p.T @ q) - ( H @ (np.outer(q, q.T)) @ H) / (q.T @ H @ q) k = k + 1 xk = x_next else: print("precision alcanzada: ") print(e) return xk return xk
def metodo_del_gradiente(f,x0,e,kmax,metodo = Armijo): k= 0 x = x0 while(norm(gradiente(f,x))>e and k<kmax): d = -gradiente(f,x) t = metodo(f,x,d) x = x + t*d k = k+1 return k,x
def metodo_del_gradiente(f, x0, e, kmax, metodo): k = 0 x = x0 while (gradiente(f, x).all() > e and k < kmax): d = -gradiente(f, x) t = metodo(f, x, d) x = x + t * d k = k + 1 return x
def metodo_de_newton(f, x0, e, kmax, metodo): k = 0 x = x0 while (gradiente(f, x).all() > e and k < kmax): d = solve(hessiano(f, x), -gradiente(f, x)) t = metodo(f, x, d) x = x + t * d k = k + 1 return x
def Armijo(f,x,d,g=0.7,n=0.45): t = 1 while(f(x + t*d) > f(x) + n*t*gradiente(f,x).T@d): t = g*t return t
def eval_mejora_m(f,xk,dk): #modelo: qk = lambda x: f(xk) + gradiente(f,xk).T@(x-xk) + 0.5*(x-xk).T@(hessiano(f,xk)@(x-xk)) mk = lambda d: qk(xk + d) ared = f(xk) - f(xk + dk) pred = mk(0) - mk(dk) return ared/pred
def metodo_de_Levenberg_Marquardt(f, x0, e, kmax, g, metodo): k = 0 x = x0 while (norm(gradiente(f, x)) > e and k < kmax): B = hessiano(f, x) mu = np.min(eigvals(B)) if (mu <= 0): B += (-mu + g) * np.eye(B.shape[0]) d = solve(B, -gradiente(f, x)) t = metodo(f, x, d) x = x + t * d k = k + 1 return x
def Wolfe(f, x, d, c1=0.5, c2=0.75): a = 0 t = 1 B = np.inf while (True): if (f(x + t * d) > f(x) + c1 * t * gradiente(f, x).T @ d): B = t t = 0.5 * (a + B) elif (np.gradient(x + t * d).T @ d < c2 * gradiente(f, x).T @ d): a = t t = 2 * a if B == np.inf else 0.5 * (a + B) else: break return t
def punto_cauchy(f,x,Dk): gk = gradiente(f,x) Bk = hessiano(f,x) gkTBkgk = gk.T@(Bk@gk) min_cuadratica = np.power(norm(gk),3)/(Dk*gkTBkgk) tk = 1 if gkTBkgk <= 0 else np.min([min_cuadratica,1]) dk = -tk*(Dk*gk/norm(gk)) return dk
def Wolfe(f,x,d,c1=0.5,c2=0.75): a = 0 t = 1 B = np.inf k = 0 while(True): if(k > 10000): break k = k + 1 if(f(x + t*d) > (f(x) + c1*t*gradiente(f,x).T@d)): B = t t = 0.5*(a + B) elif(gradiente(f,x + t*d).T@d < c2*gradiente(f,x).T@d): a = t t = 2*a if B == np.inf else 0.5*(a+B) else: break return t
def Region_de_confianza(f, x0, e, kmax, D0=1, n=0.2): k = 0 Dk = D0 xk = x0 while (norm(gradiente(f, xk)) > e and k < kmax): dk = punto_cauchy(f, xk, Dk) rhok = eval_mejora_m(f, xk, dk) if (rhok > n): xk = xk + dk if (rhok < 0.25): Dk *= 0.5 elif (rhok > 0.75 and norm(dk) == Dk): Dk *= 2.0 k = k + 1 return k, xk
def metodo_gradiente_conjugados(f,x0,e,kmax,metodo = Wolfe): d = -gradiente(f,x0) k = 0 xk = x0 n = len(xk) while(norm(gradiente(f,xk)) > e and k < kmax): tk = metodo(f,xk,d) xk_next = xk + tk*d if(np.remainder(k+1,n)!=0): B = gradiente(f,xk_next).T@gradiente(f,xk_next)/(gradiente(f,xk).T@gradiente(f,xk)) #formula de Fletcher y Reeves else: B = 0 d = -gradiente(f,xk_next) + B*d k = k + 1 xk = xk_next return k,xk