def backward_propagation(self, y, h1): self.cost = self.outputs - utils.onehot(10, y) grad_W = np.dot(self.cost.T, h1) grad_b = self.cost.sum(axis=0) return grad_W, grad_b
def backward_propagation(self, y, h1): self.cost = self.outputs - utils.onehot(10,y) grad_W = np.dot(self.cost.T, h1) grad_b = self.cost.sum(axis=0) return grad_W, grad_b
def backward_propagation_norm(self, y, h1): self.cost = self.outputs - utils.onehot(10, y) grad_W = np.outer(self.cost, np.transpose(h1)) grad_b = self.cost.sum(axis=0) return grad_W, grad_b
def backward_propagation_norm(self, y, h1): self.cost = self.outputs - utils.onehot(10,y) grad_W = np.outer(self.cost, np.transpose(h1)) grad_b = self.cost.sum(axis=0) return grad_W, grad_b