#newton_method_with_gradient_descent import exemplos from diff.autodiff import var, df err = 0.01 f = lambda x: x**2 - 9 x = var(1) lr = 0.01 # tamanho do passos n = 1000 #numero máximo de iterações for i in range(n): y = f(x.a) dy = df(f, x) x = var(x - dy * lr) if abs(dy) <= err: print("stop at it: {} x = {} : f(x) = {}".format(i + 1, x, y)) break if i == (n - 1): print("não houve convergência>: it{}".format(n)) max_iter = 1000 #x = x - 1 x = x + 1 for iter in range(max_iter): y = f(x.a) dy = df(f, x) x = var(x - y / dy) if abs(y) <= err: print("stop at iter {}: x = {} f(x) = {}".format(iter, x, y))
import exemplos from diff.autodiff import var, df, const err = 0.01 f = lambda pv, i, n: pv * (i / (1 - (1 / (1 + i)**n))) - 289.62 pv = var(10) i = const(4.20297 / 100) n = const(6) max_iter = 1000 for iter in range(max_iter): y = f(pv.a, i.a, n.a) dy = df(f, pv, i, n) pv = var(pv - y / dy) if abs(y) <= err: print(pv, iter) break if iter == max_iter: print("max_iter is complete: x = {}".format(pv))
import exemplos from diff.autodiff import var, df err = 0.01 f = lambda x: x**2 - 9 x = var(1) max_iter = 1000 for iter in range(max_iter): y = f(x.a) dy = df(f, x) x = var(x - y / dy) if abs(y) <= err: print("stop at iter {}: x = {} f(x) = {}".format(iter, x)) break if iter == max_iter: print("max_iter is complete: x = {}".format(x))
import exemplos from diff.autodiff import var, const, df f = lambda x: x**2 - 3 * x + 1 ## gradiente descent para encontrar df(x) = 0 x = var(0) # chute inicial lr = 0.01 # tamanho do passos err = 0.001 # |df(x) - df(xi)| n = 1000 #numero máximo de iterações for i in range(n): dy = df(f, x) # return dy x = var(x - dy * lr) # ajustando os passos if abs(dy) <= err: # verificando se print("stop at it: {} x = {}".format(i + 1, x)) break if i == (n - 1): print("não houve convergência>: it{}".format(n))