コード例 #1
0
 def numerical_gradient(self, x, t):
     # 勾配
     loss_W = lambda W: self.loss(x, t)
     grads = {}
     grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
     grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
     grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
     grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
     return grads
コード例 #2
0
    def numerical_gradient(self, x, t):
        loss_w = lambda w: self.loss(x, t)
        grads = {}
        grads['W1'] = functions.numerical_gradient(loss_w, self.params['W1'])
        grads['b1'] = functions.numerical_gradient(loss_w, self.params['b1'])
        grads['W2'] = functions.numerical_gradient(loss_w, self.params['W2'])
        grads['b2'] = functions.numerical_gradient(loss_w, self.params['b2'])

        return grads
コード例 #3
0
    def numerical_gradient(self, x, t):
        def loss_W(W):
            return self.loss(x, t)

        grads = {}
        grads['W1'] = f.numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = f.numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = f.numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = f.numerical_gradient(loss_W, self.params['b2'])

        return grads
コード例 #4
0
    def numerical_gradient(self, x, t):
        loss_w = lambda w: self.loss(x, t)

        grads = {}
        for idx in (1, 2, 3):
            grads['W' + str(idx)] = numerical_gradient(
                loss_w, self.params['W' + str(idx)])
            grads['b' + str(idx)] = numerical_gradient(
                loss_w, self.params['b' + str(idx)])

        return grads
コード例 #5
0
    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        for idx in range(1, self.hidden_layer_num + 2):
            grads['W' + str(idx)] = numerical_gradient(
                loss_W, self.params['W' + str(idx)])
            grads['b' + str(idx)] = numerical_gradient(
                loss_W, self.params['b' + str(idx)])

        return grads
コード例 #6
0
    def numerical_gradient(self, x: np.core.multiarray, t: np.core.multiarray):
        loss_W = lambda W: self.loss(x, t)

        grads = {
            'W1': numerical_gradient(loss_W, self.params['W1']),
            'b1': numerical_gradient(loss_W, self.params['b1']),
            'W2': numerical_gradient(loss_W, self.params['W2']),
            'b2': numerical_gradient(loss_W, self.params['b1'])
        }

        return grads
コード例 #7
0
    def numerical_gradient(self, x: np.ndarray, t: np.ndarray) -> np.ndarray:
        loss = lambda W: self.calc_loss(x, t)

        grads = {}

        for key in self.weight.keys():
            grads[key] = numerical_gradient(loss, self.weight[key])

        return grads
コード例 #8
0
    def numerical_gradient(self, x, t):

        loss_W = lambda W: self.loss(x, t, train_flg=True)

        grads = {}
        for idx in range(1, self.hidden_layer_num + 2):
            grads['W' + str(idx)] = numerical_gradient(
                loss_W, self.params['W' + str(idx)])
            grads['b' + str(idx)] = numerical_gradient(
                loss_W, self.params['b' + str(idx)])

            if self.use_batchnorm and idx != self.hidden_layer_num + 1:
                grads['gamma' + str(idx)] = numerical_gradient(
                    loss_W, self.params['gamma' + str(idx)])
                grads['beta' + str(idx)] = numerical_gradient(
                    loss_W, self.params['beta' + str(idx)])

        return grads
コード例 #9
0
    def numerical_gradient(self, x, t):
        """Calculate gradient to weight params using numerical gradient.

        Args:
            x (numpy.ndarray): image data which mean input to NN
            t (numpy.ndarray): labels

        Return:
            dictionary: dictionary of gradient to each param.
        """
        def loss_W(W):
            return self.loss(x, t)

        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])

        return grads
コード例 #10
0
    def numerical_gradient(self, x, t):
        """勾配を求める(数値微分)
        Parameters
        ----------
        x : 入力データ
        t : 教師ラベル
        Returns
        -------
        各層の勾配を持ったディクショナリ変数
            grads['W1']、grads['W2']、...は各層の重み
            grads['b1']、grads['b2']、...は各層のバイアス
        """
        loss_w = lambda w: self.loss(x, t)

        grads = {}
        for idx in (1, 2, 3):
            grads['W' + str(idx)] = numerical_gradient(
                loss_w, self.params['W' + str(idx)])
            grads['b' + str(idx)] = numerical_gradient(
                loss_w, self.params['b' + str(idx)])

        return grads
コード例 #11
0
ファイル: utils.py プロジェクト: aqla114/NeuralNetSample
def check_numerical_grad():
    def func(x):
        return x**2

    ans = numerical_gradient(func, np.array([[5], [5]]))
    print(ans)
コード例 #12
0
from functions import softmax, cross_entropy_error, numerical_gradient


class simpleNet:
    def __init__(self):
        self.W = np.random.randn(2, 3)

    def predict(self, x):
        return np.dot(x, self.W)

    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)
        return loss


if __name__ == '__main__':
    net = simpleNet()
    print(net.W)
    x = np.array([0.6, 0.9])
    p = net.predict(x)
    print(p)
    print(np.argmax(p))
    t = np.array([0, 0, 1])

    def f(W):
        return net.loss(x, t)

    print(numerical_gradient(f, net.W))
コード例 #13
0
import numpy as np
from functions import numerical_gradient


def function_2(x):
    return x[0]**2 + x[1]**2


print(numerical_gradient(function_2, np.array([3.0, 4.0])))
print(numerical_gradient(function_2, np.array([0.0, 2.0])))
print(numerical_gradient(function_2, np.array([3.0, 0.0])))
コード例 #14
0
    def predict(self, x):
        return np.dot(x, self.W)

    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)
        return loss


def f(W):
    return net.loss(x, t)


print("net.W")
net = SimpleNet()
print(net.W)

print("net.predict(x)")
x = np.array([0.6, 0.9])
p = net.predict(x)
print(p)

np.argmax(p)
t = np.array([0, 0, 1])
net.loss(x, t)

print("numerical_gradient")
dW = numerical_gradient(f, net.W)
print(dW)