def evaluation(ground_truth_file: str, predictions_file: str, string_to_match: Optional[str] = None) -> None: """Evaluate the predictions of a dataset. Args: ground_truth_file: Path to COCO ground truth file. predictions_file: Path to COCO predictions file. string_to_match: If not None, only images whose file_name match this parameter will be evaluated. """ with open(ground_truth_file) as gt: coco_ground_truth = load_coco_ground_truth_from_StringIO(gt) with open(predictions_file) as pred: coco_predictions = coco_ground_truth.loadRes(json.load(pred)) coco_eval = COCOeval(coco_ground_truth, coco_predictions, "bbox") if string_to_match is not None: filtered_ids = [ k for k, v in coco_ground_truth.imgs.items() if re.match(string_to_match, v["file_name"]) ] logger.info("Number of filtered_ids: {}".format(len(filtered_ids))) else: filtered_ids = [k for k in coco_ground_truth.imgs.keys()] coco_eval.image_ids = filtered_ids coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize()