示例#1
0
 def loss(self, predict, y):
     # Add L2 regularization for all the weights.
     reg_loss = 0.0
     for name, weight in self.params.items():
         reg_loss += np.sum(weight**2) * 0.5
     return layers.softmax_cross_entropy(predict,
                                         y) + weight_decay * reg_loss
示例#2
0
文件: cnn_sym.py 项目: zhxxhit/minpy
 def loss(self, predict, y):
     return layers.softmax_cross_entropy(predict, y)
示例#3
0
 def loss(self, predict, y):
     # Compute softmax loss between the output and the label.
     return layers.softmax_cross_entropy(predict, y)
示例#4
0
 def loss(self, predict, y):
     return layers.softmax_cross_entropy(predict, y)
示例#5
0
 def check_fn(weights):
     return layers.softmax_cross_entropy(
             f(x=x, softmax_label=fake_y, fc_weight=weights),
             fake_y)
示例#6
0
 def loss(self, predict, y):
     # Add L2 regularization for all the weights.
     reg_loss = 0.0
     for name, weight in self.params.items():
         reg_loss += np.sum(weight ** 2)
     return layers.softmax_cross_entropy(predict, y) + 0.5 * weight_decay * reg_loss
示例#7
0
 def check_fn(weights):
     return layers.softmax_cross_entropy(
         f(x=x, softmax_label=fake_y, fc_weight=weights), fake_y)
示例#8
0
 def loss(self, predict, y):
     # Compute softmax loss between the output and the label.
     return layers.softmax_cross_entropy(predict, y)