Beispiel #1
0
def main(_):
    pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)

    if FLAGS.model_dir:
        pipeline_proto.model_dir = FLAGS.model_dir
        tf.logging.info("Override model checkpoint dir: %s", FLAGS.model_dir)

    checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
    tf.logging.info('Start to evaluate checkpoint %s.', checkpoint_path)

    y_true, y_pred = [], []
    for batch_id, examples in enumerate(
            trainer.predict(pipeline_proto, checkpoint_path)):
        vocab, logits, object_names = (examples['vocab'], examples['logits'],
                                       examples['object_texts'])
        vocab = vocab.tolist()
        labels = np.zeros_like(logits)
        assert labels.shape[0] == 1

        for name in object_names[0]:
            labels[0, vocab.index(name)] = 1.0

        y_true.append(labels)
        y_pred.append(logits)

    y_true = np.concatenate(y_true, axis=0)
    y_pred = np.concatenate(y_pred, axis=0)

    mAP = average_precision_score(y_true, y_pred, average='micro')
    tf.logging.info('Evaluated %i examples.', batch_id + 1)
    tf.logging.info('Final mAP is %.3lf', mAP)
Beispiel #2
0
def main(_):
    pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)

    if FLAGS.model_dir:
        pipeline_proto.model_dir = FLAGS.model_dir
        tf.logging.info("Override model checkpoint dir: %s", FLAGS.model_dir)
    tf.logging.info("Pipeline configure: %s", '=' * 128)
    tf.logging.info(pipeline_proto)

    # Start to predict.

    checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
    assert checkpoint_path is not None

    global_step = int(checkpoint_path.split('-')[-1])
    for examples in trainer.predict(pipeline_proto, checkpoint_path):
        if 'word2vec' in examples:
            np.save(FLAGS.output_path, examples['word2vec'])
            tf.logging.info('Results are written to %s', FLAGS.output_path)
        break

    tf.logging.info('Done')
Beispiel #3
0
def main(_):
    pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)

    if FLAGS.model_dir:
        pipeline_proto.model_dir = FLAGS.model_dir
        tf.logging.info("Override model checkpoint dir: %s", FLAGS.model_dir)
    tf.logging.info("Pipeline configure: %s", '=' * 128)
    tf.logging.info(pipeline_proto)

    # Load the vocabulary file.

    categories = []
    category_to_id = {}
    with open(FLAGS.vocabulary_file, 'r') as fp:
        for line_id, line in enumerate(fp.readlines()):
            categories.append({'id': 1 + line_id, 'name': line.strip('\n')})
            category_to_id[line.strip('\n')] = 1 + line_id
    tf.logging.info("\n%s", json.dumps(categories, indent=2))

    # Start to predict.

    checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
    assert checkpoint_path is not None

    global_step = int(checkpoint_path.split('-')[-1])
    for examples in trainer.predict(pipeline_proto, checkpoint_path):
        if 'midn_proba_h_given_c' in examples:
            heatmap = _analyze_data(examples['midn_proba_h_given_c'],
                                    category_to_id, categories)
            filename = FLAGS.pipeline_proto.split('/')[1].split(
                '.')[0] + '_{}.jpg'.format(global_step)
            filename = os.path.join(FLAGS.result_dir, filename)
            cv2.imwrite(filename, heatmap)
            tf.logging.info('Results are written to %s', filename)
        break

    tf.logging.info('Done')
Beispiel #4
0
def _run_evaluation(pipeline_proto,
                    checkpoint_path,
                    evaluators,
                    category_to_id,
                    categories,
                    save_report_to_file=False):
    """Runs the prediction.

  Args:
    pipeline_proto: An instance of pipeline_pb2.Pipeline.
    checkpoint_path: Path to the checkpoint file.
    evaluators: A list of object_detection_evaluation.DetectionEvaluator.
    category_to_id: A python dict maps from the category name to integer id.
  """
    eval_count = 0
    visl_examples = []

    for examples in trainer.predict(pipeline_proto, checkpoint_path):
        batch_size = len(examples[InputDataFields.image_id])
        summary_bytes = examples['summary']

        if eval_count == 0:
            summary = tf.Summary().FromString(summary_bytes)
            class_labels = [
                x.decode('ascii')
                for x in examples[DetectionResultFields.class_labels]
            ]

        for i in range(batch_size):
            (image_id, image_height, image_width, num_groundtruths,
             groundtruth_boxes,
             groundtruth_classes) = (examples[InputDataFields.image_id][i],
                                     examples[InputDataFields.image_height][i],
                                     examples[InputDataFields.image_width][i],
                                     examples[InputDataFields.num_objects][i],
                                     examples[InputDataFields.object_boxes][i],
                                     examples[InputDataFields.object_texts][i])

            # Evaluate each OICR iterations.

            for oicr_iter, evaluator in enumerate(evaluators):
                num_detections, detection_boxes, detection_scores, detection_classes = (
                    examples[DetectionResultFields.num_detections +
                             '_at_{}'.format(oicr_iter)][i],
                    examples[DetectionResultFields.detection_boxes +
                             '_at_{}'.format(oicr_iter)][i],
                    examples[DetectionResultFields.detection_scores +
                             '_at_{}'.format(oicr_iter)][i],
                    examples[DetectionResultFields.detection_classes +
                             '_at_{}'.format(oicr_iter)][i])
                evaluator.add_single_ground_truth_image_info(
                    image_id, {
                        'groundtruth_boxes':
                        box_utils.py_coord_norm_to_abs(
                            groundtruth_boxes[:num_groundtruths], image_height,
                            image_width),
                        'groundtruth_classes':
                        np.array([
                            category_to_id[x.decode('ascii')]
                            for x in groundtruth_classes[:num_groundtruths]
                        ]),
                        'groundtruth_difficult':
                        np.zeros([num_groundtruths], dtype=np.bool)
                    })
                if not FLAGS.eval_coco_on_voc:
                    evaluator.add_single_detected_image_info(
                        image_id, {
                            'detection_boxes':
                            box_utils.py_coord_norm_to_abs(
                                detection_boxes[:num_detections], image_height,
                                image_width),
                            'detection_scores':
                            detection_scores[:num_detections],
                            'detection_classes':
                            detection_classes[:num_detections]
                        })
                else:
                    det_boxes, det_scores, det_classes = _convert_coco_result_to_voc(
                        box_utils.py_coord_norm_to_abs(
                            detection_boxes[:num_detections], image_height,
                            image_width), detection_scores[:num_detections],
                        detection_classes[:num_detections])

                    evaluator.add_single_detected_image_info(
                        image_id, {
                            'detection_boxes': det_boxes,
                            'detection_scores': det_scores,
                            'detection_classes': det_classes
                        })

            eval_count += 1
            if eval_count % 50 == 0:
                tf.logging.info('On image %i.', eval_count)

            # Add to visualization list.

            if len(visl_examples) < FLAGS.max_visl_examples:
                visl_example = {
                    InputDataFields.image_id:
                    examples[InputDataFields.image_id][i],
                    InputDataFields.image:
                    examples[InputDataFields.image][i],
                    InputDataFields.image_height:
                    examples[InputDataFields.image_height][i],
                    InputDataFields.image_width:
                    examples[InputDataFields.image_width][i],
                    InputDataFields.num_objects:
                    examples[InputDataFields.num_objects][i],
                    InputDataFields.object_boxes:
                    examples[InputDataFields.object_boxes][i],
                    InputDataFields.object_texts:
                    examples[InputDataFields.object_texts][i],
                    DetectionResultFields.num_detections:
                    num_detections,
                    DetectionResultFields.detection_boxes:
                    detection_boxes,
                    DetectionResultFields.detection_scores:
                    detection_scores,
                    DetectionResultFields.detection_classes:
                    detection_classes
                }
                for name in [
                        InputDataFields.num_captions,
                        InputDataFields.caption_strings,
                        InputDataFields.caption_lengths,
                        InputDataFields.pseudo_groundtruth_prediction,
                        'debug_groundtruth_labels', 'debug_pseudo_labels'
                ]:
                    if name in examples:
                        visl_example[name] = examples[name][i]
                visl_examples.append(visl_example)

            # Write to detection result file.

            if FLAGS.detection_result_dir:
                results = []
                detection_boxes = box_utils.py_coord_norm_to_abs(
                    detection_boxes[:num_detections], image_height,
                    image_width)

                image_id = int(image_id.decode('ascii'))
                for i in range(num_detections):
                    ymin, xmin, ymax, xmax = detection_boxes[i]
                    ymin, xmin, ymax, xmax = int(ymin), int(xmin), int(
                        ymax), int(xmax)
                    category_id = class_labels[int(detection_classes[i] - 1)]
                    results.append({
                        'image_id':
                        image_id,
                        'category_id':
                        category_id,
                        'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],
                        'score':
                        round(float(detection_scores[i]), 5),
                    })

                filename = os.path.join(FLAGS.detection_result_dir,
                                        '{}.json'.format(image_id))
                with open(filename, 'w') as fid:
                    fid.write(json.dumps(results, indent=2))
                tf.logging.info('image_id=%s, file=%s', image_id, filename)

        if eval_count > FLAGS.max_eval_examples:
            break

    # Visualize the results.

    if FLAGS.visl_file_path:
        _visualize(visl_examples, class_labels, FLAGS.visl_file_path)

    for oicr_iter, evaluator in enumerate(evaluators):
        metrics = evaluator.evaluate()
        evaluator.clear()
        for k, v in metrics.items():
            summary.value.add(tag='{}_iter{}'.format(k, oicr_iter),
                              simple_value=v)
        tf.logging.info('\n%s', json.dumps(metrics, indent=2))

        # Write the result file.
        if save_report_to_file:
            if FLAGS.evaluator == 'pascal':
                corloc = [('/'.join(k.split('/')[1:]), v)
                          for k, v in metrics.items() if 'CorLoc' in k]
                mAP = [('/'.join(k.split('/')[1:]), v)
                       for k, v in metrics.items() if 'AP' in k]

                filename = os.path.join(FLAGS.results_dir,
                                        FLAGS.pipeline_proto.split('/')[-1])
                filename = filename.replace(
                    'pbtxt', 'csv') + '.iter_{}'.format(oicr_iter)
                with open(filename, 'w') as fid:
                    fid.write('{}\n'.format(eval_count))
                    fid.write('\n')
                    for lst in [mAP, corloc]:
                        line1 = ','.join([k for k, _ in lst]).replace(
                            '@0.5IOU', '').replace('AP/',
                                                   '').replace('CorLoc/', '')
                        line2 = ' , '.join(
                            ['%.1lf' % (v * 100) for _, v in lst])

                        fid.write(line1 + '\n')
                        fid.write(line2 + '\n')
                        fid.write('\n')
                        fid.write(line1.replace(',', '&') + '\n')
                        fid.write(line2.replace(',', '&') + '\n')
                        fid.write('\n')

    if 'PascalBoxes_Precision/[email protected]' in metrics:
        return summary, metrics['PascalBoxes_Precision/[email protected]']
    return summary, metrics['DetectionBoxes_Precision/mAP']
Beispiel #5
0
def _run_evaluation(pipeline_proto,
                    checkpoint_path,
                    oicr_iterations,
                    category_to_id,
                    categories,
                    save_report_to_file=False):
  """Runs the prediction.

  Args:
    pipeline_proto: An instance of pipeline_pb2.Pipeline.
    checkpoint_path: Path to the checkpoint file.
    oicr_iterations: A list of object_detection_evaluation.DetectionEvaluator.
    category_to_id: A python dict maps from the category name to integer id.
  """
  eval_count = 0

  for examples in trainer.predict(pipeline_proto, checkpoint_path):
    batch_size = len(examples[InputDataFields.image_id])

    if eval_count == 0:
      class_labels = [
          x.decode('utf8') for x in examples[DetectionResultFields.class_labels]
      ]

    for i in range(batch_size):
      (image_id, image_height, image_width, num_groundtruths, groundtruth_boxes,
       groundtruth_classes) = (examples[InputDataFields.image_id][i],
                               examples[InputDataFields.image_height][i],
                               examples[InputDataFields.image_width][i],
                               examples[InputDataFields.num_objects][i],
                               examples[InputDataFields.object_boxes][i],
                               examples[InputDataFields.object_texts][i])

      # Evaluate each OICR iterations.

      for oicr_iter in range(1 + oicr_iterations):
        num_detections, detection_boxes, detection_scores, detection_classes = (
            examples[DetectionResultFields.num_detections +
                     '_at_{}'.format(oicr_iter)][i],
            examples[DetectionResultFields.detection_boxes +
                     '_at_{}'.format(oicr_iter)][i],
            examples[DetectionResultFields.detection_scores +
                     '_at_{}'.format(oicr_iter)][i],
            examples[DetectionResultFields.detection_classes +
                     '_at_{}'.format(oicr_iter)][i])
        if FLAGS.eval_coco_on_voc:
          det_boxes, det_scores, det_classes = _convert_coco_result_to_voc(
              box_utils.py_coord_norm_to_abs(detection_boxes[:num_detections],
                                             image_height, image_width),
              detection_scores[:num_detections],
              detection_classes[:num_detections])

      eval_count += 1
      if eval_count % 50 == 0:
        tf.logging.info('On image %i.', eval_count)

      # Write to detection result file.

      if FLAGS.detection_results_dir:
        results = []
        detection_boxes = box_utils.py_coord_norm_to_abs(
            detection_boxes[:num_detections], image_height, image_width)

        image_id = image_id.decode('utf8').split('.')[0]
        for i in range(num_detections):
          ymin, xmin, ymax, xmax = detection_boxes[i]
          ymin, xmin, ymax, xmax = int(ymin), int(xmin), int(ymax), int(xmax)
          category_id = class_labels[int(detection_classes[i] - 1)]
          results.append({
              'image_id': image_id,
              'category_id': category_id,
              'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],
              'score': round(float(detection_scores[i]), 5),
          })

        filename = os.path.join(FLAGS.detection_results_dir,
                                '{}.json'.format(image_id))
        with open(filename, 'w') as fid:
          fid.write(json.dumps(results, indent=2))
        tf.logging.info('image_id=%s, file=%s', image_id, filename)
Beispiel #6
0
def _run_prediction(pipeline_proto,
                    topic_list,
                    topic_data,
                    dbpedia_data,
                    checkpoint_path=None):
    """Runs the prediction.

  Args:
    pipeline_proto: an instance of pipeline_pb2.Pipeline.
  """
    results = {}
    side_results = {}
    metrics = Metrics()

    dbpedia_total = dbpedia_bingo = 0

    for example_index, example in enumerate(
            trainer.predict(pipeline_proto, checkpoint_path=checkpoint_path)):

        # Compute the metrics.
        image_id = example['image_id'][0]

        annotation = _load_json(
            os.path.join(FLAGS.qa_json_path, '{}.json'.format(image_id)))
        (groundtruth_list, question_list) = (annotation['groundtruth_list'],
                                             annotation['question_list'])
        prediction_list = [
            question_list[i] for i in example['similarity'][0].argsort()[::-1]
        ]

        topic = topic_data[image_id]
        _update_metrics(groundtruth_list, prediction_list, topic, metrics)

        # Create the result entry to write into the .json file.

        results[_revise_image_id(image_id)] = [
            question_list[index]
            for index in np.argsort(example['similarity'][0])[::-1]
        ]

        if example_index % 100 == 0:
            tf.logging.info('On image %i', example_index)

        if example_index + 1 >= FLAGS.number_of_eval_examples:
            break

        # Create json visualization.

        (
            adjacency,
            adjacency_logits,
            similarity,
            proposal_num,
            proposal_box,
            proposal_strings,
            proposal_lengths,
            label_num,
            label_text,
            slogan_num,
            slogan_box,
            slogan_strings,
            slogan_lengths,
            slogan_kb_num,
            slogan_kb_strings,
            slogan_kb_lengths,
        ) = (example[_FIELD_ADJACENCY][0], example[_FIELD_ADJACENCY_LOGITS][0],
             example[_FIELD_SIMILARITY][0],
             example[InputDataFields.proposal_num][0],
             example[InputDataFields.proposal_box][0],
             example[InputDataFields.proposal_text_string][0],
             example[InputDataFields.proposal_text_length][0],
             example[InputDataFields.proposal_label_num][0],
             example[InputDataFields.proposal_label_text][0],
             example[InputDataFields.slogan_num][0],
             example[InputDataFields.slogan_box][0],
             example[InputDataFields.slogan_text_string][0],
             example[InputDataFields.slogan_text_length][0],
             example[InputDataFields.slogan_kb_num][0],
             example[InputDataFields.slogan_kb_text_string][0],
             example[InputDataFields.slogan_kb_text_length][0])

        # Evaluate knowledge recall.

        dbpedia_id = b'none'
        if slogan_num > 0 and slogan_kb_num > 0:
            kb_to_slogan = adjacency[
                1 + proposal_num:1 + proposal_num + slogan_num,
                1 + proposal_num + slogan_num +
                label_num:]  # kb_to_slogan shape = [slogan_num, dbpedia_num]
            dbpedia_id = example[_FIELD_DBPEDIA_IDS][0][kb_to_slogan.max(
                0).argmax()]

        side_results[str(image_id)] = dbpedia_id.decode('utf8')

        if str(image_id) in dbpedia_data:
            kblist = dbpedia_data[str(image_id)]
            assert len(kblist) > 0, 'Empty kblist!'

            dbpedia_total += 1
            if dbpedia_id.decode('utf8') in kblist:
                dbpedia_bingo += 1

        # Results for visualization.

        if FLAGS.json_output_path:
            with open(
                    os.path.join(FLAGS.json_output_path,
                                 '{}.json'.format(image_id)), 'w') as fid:
                json_data = {
                    'image_id':
                    int(image_id),
                    'proposal_num':
                    int(proposal_num),
                    'proposal_boxes':
                    _boxes_to_json_array(proposal_box),
                    'proposal_labels':
                    _varlen_strings_to_json_array(proposal_strings,
                                                  proposal_lengths),
                    'label_num':
                    int(label_num),
                    'label_text': [x.decode('ascii') for x in label_text],
                    'slogan_num':
                    int(slogan_num),
                    'slogan_boxes':
                    _boxes_to_json_array(slogan_box),
                    'slogan_labels':
                    _varlen_strings_to_json_array(slogan_strings,
                                                  slogan_lengths),
                    'slogan_kb_num':
                    int(slogan_kb_num),
                    'slogan_kb_labels':
                    _varlen_strings_to_json_array(slogan_kb_strings,
                                                  slogan_kb_lengths),
                    'adjacency':
                    [[round(float(x), 2) for x in row] for row in adjacency],
                    'adjacency_logits': [[round(float(x), 2) for x in row]
                                         for row in adjacency_logits],
                    'predictions':
                    results[_revise_image_id(image_id)],
                    'annotations':
                    groundtruth_list
                }

                fid.write(json.dumps(json_data, indent=2))

    # Metrics.

    accuracy_list, minrank_list = [], []
    for topic in topic_list:
        accuracy_list.append(metrics.report(tag=topic, column=0))
        minrank_list.append(metrics.report(tag=topic, column=1))
    accuracy_list = [round(x, 3) for x in accuracy_list]
    minrank_list = [round(x, 3) for x in minrank_list]

    accuracy_product = np.mean(accuracy_list[:-9])
    accuracy_psa = np.mean(accuracy_list[-9:])
    minrank_product = np.mean(minrank_list[:-9])
    minrank_psa = np.mean(minrank_list[-9:])

    tf.logging.info('-' * 128)
    tf.logging.info('accuracy: product=%.3lf, psa=%.3lf', accuracy_product,
                    accuracy_psa)
    tf.logging.info('minrank: product=%.3lf, psa=%.3lf', minrank_product,
                    minrank_psa)

    dbpedia_recall_at_1 = 1.0 * dbpedia_bingo / dbpedia_total
    tf.logging.info('dbpedia accuracy: %.4lf', dbpedia_recall_at_1)

    # Results to be submitted.

    with open(FLAGS.prediction_output_path, 'w') as fid:
        fid.write(json.dumps(results, indent=2))

    with open(FLAGS.side_prediction_output_path, 'w') as fid:
        fid.write(json.dumps(side_results, indent=2))

    # Test ids to be exported.

    image_ids = [int(x.split('/')[1].split('.')[0]) for x in results.keys()]
    with open('image_id.txt', 'w') as fid:
        fid.write(json.dumps(image_ids, indent=2))

    results = {
        'accuracy/macro': np.mean(accuracy_list),
        'accuracy/micro': metrics.report(tag='general_micro', column=0),
        'accuracy/product_macro': accuracy_product,
        'accuracy/product_micro': metrics.report(tag='product_micro',
                                                 column=0),
        'accuracy/psa_macro': accuracy_psa,
        'accuracy/psa_micro': metrics.report(tag='psa_micro', column=0),
        'minrank/macro': np.mean(minrank_list),
        'minrank/micro': metrics.report(tag='general_micro', column=1),
        'minrank/product_macro': minrank_product,
        'minrank/product_micro': metrics.report(tag='product_micro', column=1),
        'minrank/psa_macro': minrank_psa,
        'minrank/psa_micro': metrics.report(tag='psa_micro', column=1),
    }
    for topic, accuracy in zip(topic_list, accuracy_list):
        results['accuracy_per_topic/{}'.format(topic)] = accuracy
    return results
Beispiel #7
0
def _run_inference(pipeline_proto, checkpoint_path, oicr_iterations):
  """Runs the prediction.

  Args:
    pipeline_proto: An instance of pipeline_pb2.Pipeline.
    checkpoint_path: Path to the checkpoint file.
    oicr_iterations: A list of object_detection_evaluation.DetectionEvaluator.
  """
  eval_count = 0

  for examples in trainer.predict(pipeline_proto, checkpoint_path):
    batch_size = len(examples[InputDataFields.image_id])

    if eval_count == 0:
      class_labels = [
          x.decode('ascii')
          for x in examples[DetectionResultFields.class_labels]
      ]

    for i in range(batch_size):
      (image_id, image_height, image_width, num_groundtruths, groundtruth_boxes,
       groundtruth_classes) = (examples[InputDataFields.image_id][i],
                               examples[InputDataFields.image_height][i],
                               examples[InputDataFields.image_width][i],
                               examples[InputDataFields.num_objects][i],
                               examples[InputDataFields.object_boxes][i],
                               examples[InputDataFields.object_texts][i])

      oicr_iter = oicr_iterations
      num_detections, detection_boxes, detection_scores, detection_classes = (
          examples[DetectionResultFields.num_detections +
                   '_at_{}'.format(oicr_iter)][i],
          examples[DetectionResultFields.detection_boxes +
                   '_at_{}'.format(oicr_iter)][i],
          examples[DetectionResultFields.detection_scores +
                   '_at_{}'.format(oicr_iter)][i],
          examples[DetectionResultFields.detection_classes +
                   '_at_{}'.format(oicr_iter)][i])

      eval_count += 1
      if eval_count % 50 == 0:
        tf.logging.info('On image %i.', eval_count)

      # Write to detection result file.
      image_id = int(image_id.decode('ascii').split('.')[0])
      results = {
          'image_id': image_id,
          'bounding_boxes': []
      }

      for i in range(num_detections):
        ymin, xmin, ymax, xmax = [round(float(x), 3) for x in detection_boxes[i]]
        class_label = class_labels[int(detection_classes[i] - 1)]
        class_score = round(float(detection_scores[i]), 3)
        results['bounding_boxes'].append({
            'class_score': class_score,
            'class_label': class_label,
            'bounding_box': {
                'ymin': ymin,
                'xmin': xmin,
                'ymax': ymax,
                'xmax': xmax
            },
        })

      filename = os.path.join(FLAGS.detection_results_dir,
                              '{}.json'.format(image_id))
      with open(filename, 'w') as fid:
        fid.write(json.dumps(results, indent=2))
      tf.logging.info('image_id=%s, file=%s', image_id, filename)