Esempio n. 1
0
 def backward_propagation(self, y, h1):
     self.cost = self.outputs - utils.onehot(10, y)
     grad_W = np.dot(self.cost.T, h1)
     grad_b = self.cost.sum(axis=0)
     return grad_W, grad_b
Esempio n. 2
0
 def backward_propagation(self, y, h1):
     self.cost = self.outputs - utils.onehot(10,y)
     grad_W = np.dot(self.cost.T, h1)
     grad_b = self.cost.sum(axis=0)
     return grad_W, grad_b
Esempio n. 3
0
 def backward_propagation_norm(self, y, h1):
     self.cost = self.outputs - utils.onehot(10, y)
     grad_W = np.outer(self.cost, np.transpose(h1))
     grad_b = self.cost.sum(axis=0)
     return grad_W, grad_b
Esempio n. 4
0
 def backward_propagation_norm(self, y, h1):
     self.cost = self.outputs - utils.onehot(10,y)
     grad_W = np.outer(self.cost, np.transpose(h1))
     grad_b = self.cost.sum(axis=0)
     return grad_W, grad_b