Exemplo n.º 1
0
        def get_result(checkpoint):
            """Evaluate the checkpoint on SQuAD v2.0."""
            # If running eval on the TPU, you will need to specify the number of
            # steps.
            reader = tf.train.NewCheckpointReader(checkpoint)
            global_step = reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
            all_results = []
            for result in estimator.predict(predict_input_fn,
                                            yield_single_examples=True,
                                            checkpoint_path=checkpoint):
                if len(all_results) % 1000 == 0:
                    tf.logging.info("Processing example: %d" %
                                    (len(all_results)))
                unique_id = int(result["unique_ids"])
                start_top_log_probs = ([
                    float(x) for x in result["start_top_log_probs"].flat
                ])
                start_top_index = [
                    int(x) for x in result["start_top_index"].flat
                ]
                end_top_log_probs = ([
                    float(x) for x in result["end_top_log_probs"].flat
                ])
                end_top_index = [int(x) for x in result["end_top_index"].flat]

                cls_logits = float(result["cls_logits"].flat[0])
                all_results.append(
                    squad_utils.RawResultV2(
                        unique_id=unique_id,
                        start_top_log_probs=start_top_log_probs,
                        start_top_index=start_top_index,
                        end_top_log_probs=end_top_log_probs,
                        end_top_index=end_top_index,
                        cls_logits=cls_logits))

            output_prediction_file = os.path.join(FLAGS.output_dir,
                                                  "predictions.json")
            output_nbest_file = os.path.join(FLAGS.output_dir,
                                             "nbest_predictions.json")
            output_null_log_odds_file = os.path.join(FLAGS.output_dir,
                                                     "null_odds.json")

            result_dict = {}
            cls_dict = {}
            squad_utils.accumulate_predictions_v2(
                result_dict, cls_dict, eval_examples, eval_features,
                all_results, FLAGS.n_best_size, FLAGS.max_answer_length,
                FLAGS.start_n_top, FLAGS.end_n_top)

            return squad_utils.evaluate_v2(
                result_dict, cls_dict, prediction_json, eval_examples,
                eval_features, all_results, FLAGS.n_best_size,
                FLAGS.max_answer_length, output_prediction_file,
                output_nbest_file, output_null_log_odds_file), int(global_step)
Exemplo n.º 2
0
        def get_result(checkpoint):
            """Evaluate the checkpoint on SQuAD v2.0."""
            # If running eval on the TPU, you will need to specify the number of
            # steps.
            reader = tf.train.NewCheckpointReader(checkpoint)
            global_step = reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
            all_results = []
            for result in estimator.predict(predict_input_fn,
                                            yield_single_examples=True,
                                            checkpoint_path=checkpoint):
                if len(all_results) % 1000 == 0:
                    tf.logging.info("Processing example: %d" %
                                    (len(all_results)))
                unique_id = int(result["unique_ids"])

                cls_logits = float(result["cls_logits"].flat[0])
                all_results.append(
                    squad_utils.RawResultV2(unique_id=unique_id,
                                            cls_logits=cls_logits))

            output_prediction_file = os.path.join(FLAGS.output_dir,
                                                  "predictions.json")
            output_nbest_file = os.path.join(FLAGS.output_dir,
                                             "nbest_predictions.json")
            output_null_log_odds_file = os.path.join(FLAGS.output_dir,
                                                     "null_odds.json")

            result_dict = {}
            cls_dict = {}
            squad_utils.accumulate_predictions_v2(
                result_dict, cls_dict, eval_examples, eval_features,
                all_results, FLAGS.n_best_size, FLAGS.max_answer_length,
                FLAGS.start_n_top, FLAGS.end_n_top)

            from squad_utils import make_qid_to_has_ans
            import numpy as np
            qid_to_has_ans = make_qid_to_has_ans(
                prediction_json)  # maps qid to True/False
            has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
            no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
            print("has_ans", len(has_ans_qids))
            print("no_ans", len(no_ans_qids))

            def compute_metrics_with_threshold(threshold):
                nonlocal result_dict
                result_dict = {}
                tp = 0
                tn = 0
                fp = 0
                fn = 0
                for example_index, example in enumerate(eval_examples):
                    m = np.min(cls_dict[example_index])
                    predict_is_impossible = 1 / (1 + np.exp(-m)) > threshold
                    # predict_is_impossible = m > threshold
                    result_dict[example.qas_id] = m
                    if example.is_impossible:
                        if predict_is_impossible:
                            tp += 1
                        else:
                            fn += 1
                    else:
                        if predict_is_impossible:
                            fp += 1
                        else:
                            tn += 1
                precision = tp / (tp + fp)
                recall = tp / (fn + tp)
                f1 = 2 * tp / (2 * tp + fp + fn)
                tf.logging.info(f"precision: {precision}"
                                f"recall: {recall}"
                                f"f1: {f1}")
                return precision, recall, f1

            # precision, recall, f1 = compute_metrics_with_threshold(0.4)
            precision, recall, f1 = compute_metrics_with_threshold(0.5)
            # precision, recall, f1 = compute_metrics_with_threshold(0.6)

            with tf.gfile.GFile(output_prediction_file, "w") as writer:
                writer.write(json.dumps(result_dict, indent=4) + "\n")

            return {
                "precision": precision,
                "recall": recall,
                "f1": f1,
                "total": len(eval_examples)
            }, int(global_step)