Ejemplo n.º 1
0
            def metric_fn(bmeo_label_ids, bmeo_pred_ids, attr_label_ids,
                          attr_pred_ids):
                bmeo_indices = []
                bmeo_calulate_label = []
                bmeo_id2label = {}

                attr_indices = []
                attr_calulate_label = []
                attr_id2label = {}

                for k, v in bmeo_label2id.items():
                    bmeo_id2label[v] = k
                    if k == 'O' or k == '[CLS]' or k == '[SEP]':
                        pass
                    else:
                        bmeo_calulate_label.append(k)
                        bmeo_indices.append(v)
                print('bmeo_calulate_label: {}'.format(bmeo_calulate_label))
                print('bmeo_indices: {}'.format(bmeo_indices))
                weight = tf.sequence_mask(FLAGS.max_seq_length)
                bmeo_precision = tf_metrics.precision(bmeo_label_ids,
                                                      bmeo_pred_ids,
                                                      num_bmeo_labels,
                                                      bmeo_indices, weight)
                bmeo_recall = tf_metrics.recall(bmeo_label_ids, bmeo_pred_ids,
                                                num_bmeo_labels, bmeo_indices,
                                                weight)
                bmeo_f = tf_metrics.f1(bmeo_label_ids, bmeo_pred_ids,
                                       num_bmeo_labels, bmeo_indices, weight)

                for k, v in attr_label2id.items():
                    attr_id2label[v] = k
                    if k == 'O' or k == '[CLS]' or k == '[SEP]':
                        pass
                    else:
                        attr_calulate_label.append(k)
                        attr_indices.append(v)
                print('attr_calulate_label: {}'.format(attr_calulate_label))
                print('attr_indices: {}'.format(attr_indices))
                weight = tf.sequence_mask(FLAGS.max_seq_length)
                attr_precision = tf_metrics.precision(attr_label_ids,
                                                      attr_pred_ids,
                                                      num_attr_labels,
                                                      attr_indices, weight)
                attr_recall = tf_metrics.recall(attr_label_ids, attr_pred_ids,
                                                num_attr_labels, attr_indices,
                                                weight)
                attr_f = tf_metrics.f1(attr_label_ids, attr_pred_ids,
                                       num_attr_labels, attr_indices, weight)

                return {
                    "bmeo_eval_precision": bmeo_precision,
                    "bmeo_eval_recall": bmeo_recall,
                    "bmeo_eval_f": bmeo_f,
                    "attr_eval_precision": attr_precision,
                    "attr_eval_recall": attr_recall,
                    "attr_eval_f": attr_f,
                    # "eval_loss": loss,
                }
Ejemplo n.º 2
0
            def metric_fn(label_ids, pred_ids):
                indices = []
                calulate_label = []
                # 加载 id2tag 字典
                id2tag = {}
                with open(os.path.join(FLAGS.output_dir, 'label2id.pkl'),
                          'rb') as f:
                    label2id = pickle.load(f)
                print('load label2id success!')
                for k, v in label2id.items():
                    id2tag[v] = k
                    if k == 'O' or k == '[CLS]' or k == '[SEP]':
                        pass
                    else:
                        calulate_label.append(k)
                        indices.append(v)
                print('calulate_label: {}'.format(calulate_label))
                print('indices: {}'.format(indices))
                weight = tf.sequence_mask(FLAGS.max_seq_length)
                precision = tf_metrics.precision(label_ids, pred_ids,
                                                 num_labels, indices, weight)
                recall = tf_metrics.recall(label_ids, pred_ids, num_labels,
                                           indices, weight)
                f = tf_metrics.f1(label_ids, pred_ids, num_labels, indices,
                                  weight)

                # print('预测实体与真实实体评价结果......')
                # entity_precision, entity_recall, entity_f1 = entity_metrics(input_ids, label_ids, pred_ids, id2char, id2tag)
                # print("Entity Test P/R/F1: {} / {} / {}".format(round(entity_precision, 2), round(entity_recall, 2), round(entity_f1, 2)))

                return {
                    "eval_precision": precision,
                    "eval_recall": recall,
                    "eval_f": f,
                    # "eval_loss": loss,
                }