Esempio n. 1
0
 def predict(self, x):
     w1, w2 = self.params['w1'], self.params['w2']
     b1, b2 = self.params['b1'], self.params['b2']
     z1 = np.dot(x, w1) + b1
     a1 = sigmoid(z1)
     z2 = np.dot(a1, w2) + b2
     y = softmax(z2)
     return y
Esempio n. 2
0
    def gradient(self, x, t):
        w1, w2 = self.params['w1'], self.params['w2']
        b1, b2 = self.params['b1'], self.params['b2']
        grads = {}

        batch_num = x.shape[0]

        # forward
        a1 = np.dot(x, w1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, w2) + b2
        y = softmax(a2)

        # backward
        dy = (y - t) / batch_num
        grads['w2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)

        da1 = np.dot(dy, w2.T)
        dz1 = sigmoid_grad(a1) * da1
        grads['w1'] = np.dot(x.T, dz1)
        grads['b1'] = np.sum(dz1, axis=0)

        return grads
Esempio n. 3
0
import numpy as np
from common.funcs import softmax, softmax2


a = [[1, 2, 3, 4, 5],
     [3, 5, 6, 8, 9],
     [0, 7, 2, 4, 6]]
a = np.array(a)
print(softmax(a) == softmax2(a))
 def forward(self, x, t):
     self.t = t
     self.y = softmax(x)
     self.loss = cross_entropy_error(self.y, self.t)
     return self.loss
    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)

        return loss