Пример #1
0
 def metric_fn(predicate_head_select_loss, token_label_per_example_loss, token_label_ids, token_label_logits,
               is_real_example):
     token_label_predictions = tf.argmax(token_label_logits, axis=-1, output_type=tf.int32)
     token_label_pos_indices_list = list(range(num_token_labels))[
                                    4:]  # ["[Padding]","[##WordPiece]", "[CLS]", "[SEP]"] + seq_out_set
     pos_indices_list = token_label_pos_indices_list[:-1]  # do not care "O"
     token_label_precision_macro = tf_metrics.precision(token_label_ids, token_label_predictions,
                                                        num_token_labels,
                                                        pos_indices_list, average="macro")
     token_label_recall_macro = tf_metrics.recall(token_label_ids, token_label_predictions, num_token_labels,
                                                  pos_indices_list, average="macro")
     token_label_f_macro = tf_metrics.f1(token_label_ids, token_label_predictions, num_token_labels,
                                         pos_indices_list,
                                         average="macro")
     token_label_precision_micro = tf_metrics.precision(token_label_ids, token_label_predictions,
                                                        num_token_labels,
                                                        pos_indices_list, average="micro")
     token_label_recall_micro = tf_metrics.recall(token_label_ids, token_label_predictions, num_token_labels,
                                                  pos_indices_list, average="micro")
     token_label_f_micro = tf_metrics.f1(token_label_ids, token_label_predictions, num_token_labels,
                                         pos_indices_list,
                                         average="micro")
     token_label_loss = tf.metrics.mean(values=token_label_per_example_loss, weights=is_real_example)
     predicate_head_select_loss = tf.metrics.mean(values=predicate_head_select_loss)
     return {
         "predicate_head_select_loss": predicate_head_select_loss,
         "eval_token_label_precision(macro)": token_label_precision_macro,
         "eval_token_label_recall(macro)": token_label_recall_macro,
         "eval_token_label_f(macro)": token_label_f_macro,
         "eval_token_label_precision(micro)": token_label_precision_micro,
         "eval_token_label_recall(micro)": token_label_recall_micro,
         "eval_token_label_f(micro)": token_label_f_micro,
         "eval_token_label_loss": token_label_loss,
     }
Пример #2
0
            def metric_fn(per_example_loss, label_ids, logits):
                # def metric_fn(label_ids, logits):
                predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
                # 评估函数,计算准确率、召回率、F1,假如改类别的话,下方数字需要修改,10是总类别数,1-6是有用的类别。B、I、E,
                # 具体见 tf.metrics里的函数
                precision = tf_metrics.precision(label_ids,
                                                 predictions,
                                                 41,
                                                 list(range(1, 41)),
                                                 average="macro")
                recall = tf_metrics.recall(label_ids,
                                           predictions,
                                           41,
                                           list(range(1, 41)),
                                           average="macro")
                f = tf_metrics.f1(label_ids,
                                  predictions,
                                  41,
                                  list(range(1, 41)),
                                  average="macro")

                return {
                    "eval_precision": precision,
                    "eval_recall": recall,
                    "eval_f": f,
                    # "eval_loss": loss,
                }