def main(unused_argv):
  """Entry function of the script."""
  if not FLAGS.submission_file:
    raise ValueError("You must input submission file.")
  eval_labels = read_labels(FLAGS.eval_data_pattern,
                            cache_path=FLAGS.label_cache)
  tf.logging.info("Total rated segments: %d." % len(eval_labels.labels))
  positive_counter = {}
  for k, v in eval_labels.labels.items():
    _, label_id = k
    if v > 0:
      positive_counter[label_id] = positive_counter.get(label_id, 0) + 1

  seg_preds = read_segment_predictions(FLAGS.submission_file,
                                       eval_labels,
                                       top_n=FLAGS.top_n)
  map_cal = map_calculator.MeanAveragePrecisionCalculator(len(seg_preds))
  seg_labels = []
  seg_scored_preds = []
  num_positives = []
  for label_id in sorted(seg_preds):
    class_preds = seg_preds[label_id]
    seg_label = [eval_labels.labels[(pred, label_id)] for pred in class_preds]
    seg_labels.append(seg_label)
    seg_scored_pred = []
    if class_preds:
      seg_scored_pred = [
          float(x) / len(class_preds) for x in range(len(class_preds), 0, -1)
      ]
    seg_scored_preds.append(seg_scored_pred)
    num_positives.append(positive_counter[label_id])
  map_cal.accumulate(seg_scored_preds, seg_labels, num_positives)
  map_at_n = np.mean(map_cal.peek_map_at_n())
  tf.logging.info("Num classes: %d | mAP@%d: %.6f" %
                  (len(seg_preds), FLAGS.top_n, map_at_n))
예제 #2
0
 def __init__(self, num_class, top_k):
  
   self.sum_hit_at_one = 0.0
   self.sum_perr = 0.0
   self.sum_loss = 0.0
   self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(num_class)
   self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()
   self.top_k = top_k
   self.num_examples = 0
예제 #3
0
 def __init__(self, num_class, top_k):
   """Construct an EvaluationMetrics object to store the evaluation metrics.
   Args:
     num_class: A positive integer specifying the number of classes.
     top_k: A positive integer specifying how many predictions are considered per video.
   Raises:
     ValueError: An error occurred when MeanAveragePrecisionCalculator cannot
       not be constructed.
   """
   self.sum_hit_at_one = 0.0
   self.sum_perr = 0.0
   self.sum_loss = 0.0
   self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(num_class)
   self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()
   self.top_k = top_k
   self.num_examples = 0
def main(unused_argv):
    """Entry function of the script."""

    eval_labels = read_labels(
        '../../inputs/data/frame/3/validate/validate*.tfrecord')
    positive_counter = {}
    for k, v in eval_labels.labels.items():
        _, label_id = k
        if v > 0:
            positive_counter[label_id] = positive_counter.get(label_id, 0) + 1

    seg_preds = read_segment_predictions(FLAGS.submission_file,
                                         eval_labels,
                                         top_n=100000)
    map_cal = map_calculator.MeanAveragePrecisionCalculator(len(seg_preds))
    seg_labels = []
    seg_scored_preds = []
    num_positives = []
    for label_id in sorted(seg_preds):
        class_preds = seg_preds[label_id]
        seg_label = [
            eval_labels.labels[(pred, label_id)] for pred in class_preds
        ]
        seg_labels.append(seg_label)
        seg_scored_pred = []
        if class_preds:
            seg_scored_pred = [
                float(x) / len(class_preds)
                for x in range(len(class_preds), 0, -1)
            ]
        seg_scored_preds.append(seg_scored_pred)
        num_positives.append(positive_counter.get(label_id, 0))
    map_cal.accumulate(seg_scored_preds, seg_labels, num_positives)
    map_at_n = np.mean(map_cal.peek_map_at_n())
    tf.logging.info("Num classes: %d | mAP@%d: %.6f" %
                    (len(seg_preds), FLAGS.top_n, map_at_n))