예제 #1
0
import exemplos
from diff.revediff import var, const, df
import numpy as np
f = lambda x: 2 * x + 1
xi = var(1)
x = np.array([xi, xi + 1, xi + 2])
dx = df(f, x)
print(f(x))
예제 #2
0
import exemplos
from diff.revediff import var, const, df
xs = [i / 10 for i in range(11)]
ys = [x**2 + 0.7 * x + 0.1 for x in xs]

loss = lambda x, y, a, b, c: (a * x**2 + b * x + c - y)**2
err = lambda a, b, c: sum(
    [loss(xs[i], ys[i], a, b, c) for i in range(len(xs))])

a = var(1)
b = var(0)
c = var(1)

lr = 0.01
for k in range(10000):
    tam = len(xs)
    da, db, dc = df(err, a, b, c)
    a = a - lr * da
    b = b - lr * db
    c = c - lr * dc
print(a, b, c)
예제 #3
0
import exemplos
from diff.revediff import var, const, df
xs = [i / 10 for i in range(11)]
ys = [2 * x + 0.5 for x in xs]

loss = lambda x, y, a, b: (a * x + b - y)**2
err = lambda a, b: sum([loss(xs[i], ys[i], a, b) for i in range(len(xs))])

a = var(1)
b = var(0)
lr = 0.01
for k in range(100):
    tam = len(xs)
    da, db = df(err, a, b)
    a = a - lr * da
    b = b - lr * db
print(a, b)
예제 #4
0
import exemplos
from diff.revediff import var, const, df
f = lambda x: x**2 - 3 * x + 1

## gradiente descent para encontrar df(x) = 0

x = var(0)  # chute inicial
lr = 0.01  # tamanho do passos
err = 0.001  # |df(x) - df(xi)|

n = 1000  #numero máximo de iterações

for i in range(n):
    dy = df(f, x)  # return dy
    x = var(x - dy * lr)
    # ajustando os passos
    if abs(dy) <= err:  # verificando se
        print("stop at it: {} x = {}".format(i + 1, x))
        break
    if i == (n - 1):
        print("não houve convergência>: it{}".format(n))
예제 #5
0
#teste.py
import exemplos
from diff.revediff import df, var, const, new_df
f = lambda x, y: x / (x**2 + y**2)
x = var(1)
y = const(1)

fxy, dx = new_df(f)(x, y)
print(fxy, dx)