def log_norm_step(self, inputs, states): """递归求解归一化因子""" inputs, mask = inputs[:, :-1], inputs[:, -1:] states = K.expand_dims(states[0], 2) # batch_size,output_dim, 1 trans = K.expand_dims(self.trans, 0) # 1, output_dim, output_dim outputs = K.logsumexp(states + trans, 1) outputs += inputs outputs = mask * outputs + (1 - mask) * states[:, :, 0] return outputs, [outputs]
def dense_loss(self, y_true, y_pred): """y_true需要是one hot形式 """ # 导出mask并转换数据类型 mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True) mask = K.cast(mask, K.floatx()) # 计算目标分数 y_true, y_pred = y_true * mask, y_pred * mask target_score = self.path_score(y_pred, y_true) # 递归计算log Z init_states = [y_pred[:, 0]] y_pred = K.concatenate([y_pred, mask], axis=2) input_length = K.int_shape(y_pred[:, 1:])[1] log_norm, _, _ = K.rnn(self.log_norm_step, y_pred[:, 1:], init_states, input_length=input_length) # 最后一步的log Z向量 log_norm = K.logsumexp(log_norm, 1) # logsumexp得标量 # 计算损失 -log p return log_norm - target_score