Exemple #1
0
 def loss(self, predict, y):
     # Add L2 regularization for all the weights.
     reg_loss = 0.0
     for name, weight in self.params.items():
         reg_loss += np.sum(weight**2) * 0.5
     return layers.softmax_cross_entropy(predict,
                                         y) + weight_decay * reg_loss
Exemple #2
0
 def loss(self, predict, y):
     return layers.softmax_cross_entropy(predict, y)
 def loss(self, predict, y):
     # Compute softmax loss between the output and the label.
     return layers.softmax_cross_entropy(predict, y)
Exemple #4
0
 def loss(self, predict, y):
     return layers.softmax_cross_entropy(predict, y)
Exemple #5
0
 def check_fn(weights):
     return layers.softmax_cross_entropy(
             f(x=x, softmax_label=fake_y, fc_weight=weights),
             fake_y)
Exemple #6
0
 def loss(self, predict, y):
     # Add L2 regularization for all the weights.
     reg_loss = 0.0
     for name, weight in self.params.items():
         reg_loss += np.sum(weight ** 2)
     return layers.softmax_cross_entropy(predict, y) + 0.5 * weight_decay * reg_loss
Exemple #7
0
 def check_fn(weights):
     return layers.softmax_cross_entropy(
         f(x=x, softmax_label=fake_y, fc_weight=weights), fake_y)
Exemple #8
0
 def loss(self, predict, y):
     # Compute softmax loss between the output and the label.
     return layers.softmax_cross_entropy(predict, y)