예제 #1
0
    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
        return grads
예제 #2
0
    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        grads["W1"] = numerical_gradient(loss_W, self.params["W1"])
        grads["b1"] = numerical_gradient(loss_W, self.params["b1"])
        grads["W2"] = numerical_gradient(loss_W, self.params["W2"])
        grads["b2"] = numerical_gradient(loss_W, self.params["b2"])

        return grads
예제 #3
0
    def batch_gradient(self, Gs, ts):
        loss_f = lambda f: self.batch_loss(Gs, ts)

        # 損失関数を計算
        grad_W = numerical_gradient(loss_f, self.params["W"])
        grad_A = numerical_gradient(loss_f, self.params["A"])
        grad_b = numerical_gradient(loss_f, self.params["b"])

        grads = {"W": grad_W, "A": grad_A, "b": grad_b}

        return grads
예제 #4
0
  def numerical_gradient(self, G, x, t):
    loss_f = lambda f : self.loss(G, x, t)
    # 損失関数を計算
    grad_W = numerical_gradient(loss_f, self.params["W"])
    grad_A = numerical_gradient(loss_f, self.params["A"])
    grad_b = numerical_gradient(loss_f, self.params["b"])

    grads = {
      "W" : grad_W,
      "A" : grad_A,
      "b" : grad_b
    }
    return grads
예제 #5
0
    def numerical_gradient(self, x, t):
        """勾配を求める(数値微分)

        Parameters
        ----------
        x : 入力データ
        t : 教師ラベル

        Returns
        -------
        各層の勾配を持ったディクショナリ変数
            grads['W1']、grads['W2']、...は各層の重み
            grads['b1']、grads['b2']、...は各層のバイアス
        """
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        for idx in range(1, self.hidden_layer_num+2):
            grads['W' + str(idx)] = numerical_gradient(loss_W, self.params['W' + str(idx)])
            grads['b' + str(idx)] = numerical_gradient(loss_W, self.params['b' + str(idx)])

        return grads
예제 #6
0
import sys
import os
import numpy as np
from pathlib import Path

try:
    print(
        os.path.join(Path(os.getcwd()).parent, 'lib')
    )  #이렇게 출력-> C:\Users\GX701GXR\PycharmProjects\linear-algebra-basic\lib 그런데 바로 넣으면 안됨. /나 \\로 해야함
    sys.path.append(
        'C:/Users/GX701GXR/PycharmProjects/linear-algebra-basic/lib')
    #sys.path.append('..\lib')
    #sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    import common as cm
except ImportError:
    print('lib ??')


def f(v):
    return np.sum(v**2, axis=0)


print(cm.numerical_gradient(f, np.array([3., 4.])))
예제 #7
0
# 기울기(gradient)
import os
import sys
from pathlib import Path
import numpy as np
try:
    # sys.path.append('D:/deep-learning/PycharmProjects/linear-algebra-basics/lib')
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from common import numerical_gradient
except ImportError:
    print('Library Module Can Not Found')


def f(x):
    return np.sum(x**2, axis=0)


# 함수 테스트
# print(f(np.array([3., 4.])))
gra1 = numerical_gradient(f, np.array([3., 4.]))
gra2 = numerical_gradient(f, np.array([-1., -1.5]))
gra3 = numerical_gradient(f, np.array([-0.25, -0.25]))

print(gra1, gra2, gra3)


예제 #8
0
import numpy as np

try:
    #print(os.getcwd())
    #sys.path.append('F:\deep-learning\dowork\PycharmProjects\linear-algebra-basics\lib')
    #p1 = Path(os.getcwd()).parent + '\\lib'
    #p2 = os.path.join(Path(os.getcwd()).parent, 'lib')
    #print(p2)
    #sys.path.append(p2)

    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))

    #import common as cm
    from common import numerical_gradient

except ImportError:
    print('Library Module Can Not Found')


def f(x):
    return np.sum(x**2, axis=0)


#함수테스트

gra1 = numerical_gradient(f, np.array([3., 4.]))
gra2 = numerical_gradient(f, np.array([0., 2.]))
gra3 = numerical_gradient(f, np.array([3., 0.]))

print(gra1, gra2, gra3)
예제 #9
0
class simpleNet:
    def __init__(self):
        self.W = np.random.randn(2, 3)

    def predict(self, x):
        return np.dot(x, self.W)

    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)
        return loss

net = simpleNet()
print(net.W)

x = np.array([0.6, 0.9])
p = net.predict(x)

print(p)
print(np.argmax(p))

t = np.array([0, 0, 1])
print(net.loss(x, t))

def f(W):
    return net.loss(x, t)

print(numerical_gradient(f, net.W))
예제 #10
0
        self.W = np.random.randn(2, 3)

    def predict(self, x):
        return np.dot(x, self.W)

    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)
        return loss


net = SimpleNet()

x = np.array([0.6, 0.9])
p = net.predict(x)
print(p)

print(np.argmax(p))

t = np.array([0, 0, 1])
print(net.loss(x, t))


def f(W):
    return net.loss(x, t)


dW = numerical_gradient(f, net.W)
print(dW)