Beispiel #1
0
def sparse_categorical_crossentropy(y_true, y_pred):
    # y_true需要重新明确一下shape和dtype
    y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
    y_true = K.cast(y_true, 'int32')
    y_true = K.one_hot(y_true, K.shape(y_pred)[2])
    # 计算交叉熵
    return K.mean(K.categorical_crossentropy(y_true, y_pred))
Beispiel #2
0
 def compute_loss_of_similarity(self, inputs):
     y_true = self.get_labels_of_similarity(inputs)  # 构建标签
     y_pred = K.l2_normalize(inputs, axis=1)  # 句向量归一化
     similarities = K.dot(y_pred, K.transpose(y_pred))  # 相似度矩阵
     similarities = similarities - K.eye(K.shape(y_pred)[0]) * 1e12  # 排除对角线
     similarities = similarities * self.scale  # scale
     loss = K.categorical_crossentropy(
         y_true, similarities, from_logits=True
     )
     return loss
 def compute_loss_of_similarity(self, inputs, mask=None):
     # _, _, _, y_pred, _ = inputs
     _, _, _, _, y_pred = inputs  # use last layer's logits
     y_true = self.get_labels_of_similarity(y_pred)  # 构建标签
     y_pred = K.l2_normalize(y_pred, axis=1)  # 句向量归一化
     similarities = K.dot(y_pred, K.transpose(y_pred))  # 相似度矩阵
     similarities = similarities - K.eye(K.shape(y_pred)[0]) * 1e12  # 排除对角线
     similarities = similarities * 20  # scale
     loss = K.categorical_crossentropy(
         y_true, similarities, from_logits=True
     )
     self.add_metric(loss, 'sim_loss')
     return loss
 def cal_ce(self, inputs):
     source, target = inputs
     source_t = K.softmax(source / self.temperature)
     target_t = K.softmax(target / self.temperature)
     ce = K.categorical_crossentropy(source_t, target_t)
     return ce