Example #1
0
def newton_method():
    x = Variable(np.array(2.0))
    iters = 10

    for i in range(iters):
        print(i, x)

        y = f(x)
        x.cleargrad()
        y.backward()

        x.data -= x.grad / gx2(x.data)
Example #2
0
def accuracy(y, t):
    y, t = as_variable(y), as_variable(t)

    pred = y.data.argmax(axis=1).reshape(t.shape)
    result = (pred == t.data)
    acc = result.mean()
    return Variable(as_array(acc))
Example #3
0
def gradient_descent():
    x0 = Variable(np.array(0.0))
    x1 = Variable(np.array(2.0))
    lr = 0.001 # Learning rate
    iters = 1000 # iterations

    for i in range(iters):
        print(i, x0, x1)
        
        y = rosenbrock(x0, x1)

        x0.cleargrad()
        x1.cleargrad()
        y.backward()
        
        x0.data -= lr * x0.grad
        x1.data -= lr * x1.grad
Example #4
0
def secondorder_method():
    x = Variable(np.array(2.0))
    iters = 10

    for i in range(iters):
        print(i, x)

        y = f(x)
        x.cleargrad()
        y.backward(create_graph=True)

        gx = x.grad
        x.cleargrad()
        gx.backward(create_graph=True)
        
        gx2 = x.grad

        x.data -= gx.data / gx2.data
Example #5
0
import numpy as np
from myke import Variable
from myke.utils import plot_dot_graph


def goldstein(x, y):
    z = (1 + (x + y + 1)**2 * (19 - 14*x + 3*x**2 - 14*y + 6*x*y + 3*y**2)) * \
        (30 + (2*x - 3*y)**2 * (18 - 32*x + 12*x**2 + 48*y - 36*x*y + 27*y**2))
    return z


x = Variable(np.array(1.0), 'x')
y = Variable(np.array(1.0), 'y')
z = goldstein(x, y)
z.name = 'z'

plot_dot_graph(z, verbose=False)
Example #6
0
import numpy as np
from myke import Variable
import myke.functions as F

# 토이 데이터셋
np.random.seed(0)  # 시드값 고정
x = np.random.rand(100, 1)
y = 5 + 2 * x + np.random.rand(100, 1)  # y에 무작위 노이즈 추가
x, y = Variable(x), Variable(y)  # 생략 가능

W = Variable(np.zeros((1, 1)))
b = Variable(np.zeros(1))


def predict(x):
    y = F.matmul(x, W) + b
    return y


lr = 0.1
iters = 100

for i in range(iters):
    y_pred = predict(x)
    loss = F.mean_squared_error(y, y_pred)

    W.cleargrad()
    b.cleargrad()
    loss.backward()

    W.data -= lr * W.grad.data
Example #7
0
#%%

import numpy as np
import matplotlib.pyplot as plt
from myke import Variable
import myke.functions as F

x = Variable(np.linspace(-7, 7, 200))
y = F.sin(x)
y.backward(create_graph=True)

logs = [y.data]

for i in range(3):
    logs.append(x.grad.data)
    gx = x.grad
    x.cleargrad()
    gx.backward(create_graph=True)

# 그래프 그리기
labels = ["y=sin(x)", "y'", "y''", "y'''"]
for i, v in enumerate(logs):
    plt.plot(x.data, logs[i], label=labels[i])
plt.legend(loc='lower right')
plt.show()
# %%
Example #8
0
import numpy as np
from myke import Variable
import myke.functions as F


def f(x):
    y = x**4 - 2 * x**2
    return y


x = Variable(np.array(2.0))
y = f(x)
y.backward(create_graph=True)
print(x.grad)

gx = x.grad
x.cleargrad()
gx.backward(create_graph=True)
print(x.grad)

x = Variable(np.array(1.0))
y = F.sin(x)
y.backward(create_graph=True)

for i in range(3):
    gx = x.grad
    x.cleargrad()
    gx.backward(create_graph=True)
    print(x.grad)
Example #9
0
import numpy as np
from myke import Variable
import myke.functions as F

x0 = Variable(np.array([1, 2, 3]))
x1 = Variable(np.array([10]))
y = x0 + x1
print(y)

y.backward()
print(x1.grad)
Example #10
0
import numpy as np
import math

from myke import Variable
from myke.utils import plot_dot_graph


def sphere(x, y):
    z = x**2 + y**2
    return z


x = Variable(np.array(1.0))
y = Variable(np.array(1.0))
z = sphere(x, y)
z.backward()

print("sphere", z)
print(x.grad, y.grad)


def matyas(x, y):
    z = 0.26 * (x**2 + y**2) - 0.48 * x * y
    return z


x = Variable(np.array(1.0))
y = Variable(np.array(1.0))
z = matyas(x, y)
z.backward()