def compute_map(labels_and_predictions, coco_gt, use_cpp_extension=True):
    """Use model predictions to compute mAP.

  The evaluation code is largely copied from the MLPerf reference
  implementation. While it is possible to write the evaluation as a tensor
  metric and use Estimator.evaluate(), this approach was selected for simplicity
  and ease of duck testing.

  Args:
    labels_and_predictions: A map from TPU predict method.
    coco_gt: ground truch COCO object.
    use_cpp_extension: use cocoeval C++ library.
  Returns:
    Evaluation result.
  """

    predictions = []
    tic = time.time()

    for example in labels_and_predictions:
        if constants.IS_PADDED in example and example[constants.IS_PADDED]:
            continue

        htot, wtot, _ = example[constants.RAW_SHAPE]
        pred_box = example['pred_box']
        pred_scores = example['pred_scores']
        indices = example['indices']
        loc, label, prob = decode_single(pred_box, pred_scores, indices,
                                         constants.OVERLAP_CRITERIA,
                                         constants.MAX_NUM_EVAL_BOXES,
                                         constants.MAX_NUM_EVAL_BOXES)

        for loc_, label_, prob_ in zip(loc, label, prob):
            # Ordering convention differs, hence [1], [0] rather than [0], [1]
            predictions.append([
                int(example[constants.SOURCE_ID]),
                loc_[1] * wtot, loc_[0] * htot, (loc_[3] - loc_[1]) * wtot,
                (loc_[2] - loc_[0]) * htot, prob_,
                constants.CLASS_INV_MAP[label_]
            ])

    toc = time.time()
    tf.logging.info('Prepare predictions DONE (t={:0.2f}s).'.format(toc - tic))

    if use_cpp_extension:
        coco_dt = coco_gt.LoadRes(np.array(predictions, dtype=np.float32))
        coco_eval = COCOeval(coco_gt, coco_dt, iou_type='bbox')
        coco_eval.Evaluate()
        coco_eval.Accumulate()
        coco_eval.Summarize()
        stats = coco_eval.GetStats()

    else:
        coco_dt = coco_gt.loadRes(np.array(predictions))

        coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        stats = coco_eval.stats

    print('Current AP: {:.5f}'.format(stats[0]))
    metric_names = [
        'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', 'ARmax10',
        'ARmax100', 'ARs', 'ARm', 'ARl'
    ]
    coco_time = time.time()
    tf.logging.info('COCO eval DONE (t={:0.2f}s).'.format(coco_time - toc))

    # Prefix with "COCO" to group in TensorBoard.
    return {'COCO/' + key: value for key, value in zip(metric_names, stats)}