Exemple #1
0
 def _derivative(self, feed, input, target):
     logits = _softmax(input.eval(feed))
     forward_gradient = self.back(target, feed)
     local_gradient = []
     for i in range(input.shape[0]):
         local_logits = logits[i].reshape((-1, 1))
         jacob = np.eye(
             local_logits.shape[0]) - np.matmul(local_logits, (0 * local_logits + 1).T)
         local_gradient.append(
             np.matmul(jacob, forward_gradient[i].reshape((-1, 1))).T)
     local_gradient = np.concatenate(local_gradient, 0)
     return local_gradient
Exemple #2
0
 def _eval(self, feed):
     result = _softmax(self.input_list[0].eval(feed))
     return result