def numerical_gradient(self, x, t):
        """勾配を求める(数値微分)
        Parameters
        ----------
        x : 入力データ
        t : 教師ラベル
        Returns
        -------
        各層の勾配を持ったディクショナリ変数
            grads['W1']、grads['W2']、...は各層の重み
            grads['b1']、grads['b2']、...は各層のバイアス
        """
        loss_W = lambda W: self.loss(x, t, train_flg=True)

        grads = {}
        for idx in range(1, self.hidden_layer_num + 2):
            grads['W' + str(idx)] = numerical_gradient(
                loss_W, self.params['W' + str(idx)])
            grads['b' + str(idx)] = numerical_gradient(
                loss_W, self.params['b' + str(idx)])

            if self.use_batchnorm and idx != self.hidden_layer_num + 1:
                grads['gamma' + str(idx)] = numerical_gradient(
                    loss_W, self.params['gamma' + str(idx)])
                grads['beta' + str(idx)] = numerical_gradient(
                    loss_W, self.params['beta' + str(idx)])

        return grads
    def numerical_gradient(self, X, t):
        loss_W = lambda W: self.loss(X, t)

        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])

        return grads
    def numerical_gradient(self, x, t):
        """勾配を求める(数値微分)
        Parameters
        ----------
        x : 入力データ
        t : 教師ラベル
        Returns
        -------
        各層の勾配を持ったディクショナリ変数
            grads['W1']、grads['W2']、...は各層の重み
            grads['b1']、grads['b2']、...は各層のバイアス
        """
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        for idx in range(1, self.hidden_layer_num + 2):
            grads['W' + str(idx)] = numerical_gradient(
                loss_W, self.params['W' + str(idx)])
            grads['b' + str(idx)] = numerical_gradient(
                loss_W, self.params['b' + str(idx)])

        return grads
    def numerical_gradient(self, x, t):
        """勾配を求める(数値微分)
        Parameters
        ----------
        x : 入力データ
        t : 教師ラベル
        Returns
        -------
        各層の勾配を持ったディクショナリ変数
            grads['W1']、grads['W2']、...は各層の重み
            grads['b1']、grads['b2']、...は各層のバイアス
        """
        loss_w = lambda w: self.loss(x, t)

        grads = {}
        for idx in (1, 2, 3):
            grads['W' + str(idx)] = numerical_gradient(
                loss_w, self.params['W' + str(idx)])
            grads['b' + str(idx)] = numerical_gradient(
                loss_w, self.params['b' + str(idx)])

        return grads
Beispiel #5
0
def tangent_line(f, x):
    d = numerical_gradient(f, x)
    print(d)
    y = f(x) - d * x
    return lambda t: d * t + y
Beispiel #6
0
from mpl_toolkits.mplot3d import Axes3D
from common_function import numerical_gradient, function_2


def tangent_line(f, x):
    d = numerical_gradient(f, x)
    print(d)
    y = f(x) - d * x
    return lambda t: d * t + y


if __name__ == '__main__':
    x0 = np.arange(-2, 2.5, 0.25)
    x1 = np.arange(-2, 2.5, 0.25)
    X, Y = np.meshgrid(x0, x1)

    X = X.flatten()
    Y = Y.flatten()

    grad = numerical_gradient(function_2, np.array([X, Y]).T).T

    plt.figure()
    plt.quiver(X, Y, -grad[0], -grad[1], angles="xy", color="#666666")
    plt.xlim([-2, 2])
    plt.ylim([-2, 2])
    plt.xlabel('x0')
    plt.ylabel('x1')
    plt.grid()
    plt.draw()
    plt.show()
sys.path.append(os.pardir)
import numpy as np
from common_function import softmax, cross_entropy, numerical_gradient, sigmoid


class simpleNet():
    def __init__(self):
        self.W = np.random.randn(2, 3)  #2*3のガウス分布で初期化する

    def predict(self, X):
        self.a = np.dot(X, self.W)
        self.z = sigmoid(self.a)
        self.y = softmax(self.z)
        return self.y

    def loss_cal(self, X, t):
        self.y = self.predict(X)
        self.loss = cross_entropy(self.y, t)

        return self.loss


X = np.array([0.6, 0.9])
t = np.array([0, 0, 1])

net = simpleNet()

f = lambda w: net.loss_cal(X, t)
dW = numerical_gradient(f, net.W)

print(dW)
Beispiel #8
0
    def predict(self, x):
        return np.dot(x, self.W)

    # 損失関数の値を求めるメソッド
    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)

        return loss


net = simpleNet()
print(net.W)  # 重みパラメータ

x = np.array([0.6, 0.9])
p = net.predict(x)
print(p)

print(np.argmax(p))  # 最大値のインデックス

t = np.array([0, 0, 1])  # 正解ラベル
print(net.loss(x, t))

#def f(W):
#    return net.loss(x, t)
f = lambda w: net.loss(x, t)

dw = numerical_gradient(f, net.W)  # 勾配
print(dw)