Пример #1
0
 def crossentropy_loss(self,total_input,targets,grad=False):
     log_preds = nu.log_sigmoid(total_input)
     log_one_minus_preds = nu.log_sigmoid(-total_input)
     preds = np.exp(log_preds)
     if (not grad):
         loss = -np.sum(targets*log_preds + (1-targets)*log_one_minus_preds,1).mean(0)
         return loss
     else:
         dloss = (preds - targets) / targets.shape[0]
         return dloss
Пример #2
0
 def nonlin(self,total_input):
     return -nu.log_sigmoid(-total_input)