def crossentropy_loss(self,total_input,targets,grad=False): log_preds = nu.log_sigmoid(total_input) log_one_minus_preds = nu.log_sigmoid(-total_input) preds = np.exp(log_preds) if (not grad): loss = -np.sum(targets*log_preds + (1-targets)*log_one_minus_preds,1).mean(0) return loss else: dloss = (preds - targets) / targets.shape[0] return dloss
def nonlin(self,total_input): return -nu.log_sigmoid(-total_input)