def evaluate_coco_PRC(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None): """Runs official COCO evaluation. dataset: A Dataset object with valiadtion data eval_type: "bbox" or "segm" for bounding box or segmentation evaluation limit: if not 0, it's the number of images to use for evaluation """ # Pick COCO images from the dataset image_ids = image_ids or dataset.image_ids # Limit to a subset if limit: image_ids = image_ids[:limit] # Get corresponding COCO image IDs. coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids] t_prediction = 0 t_start = time.time() results = [] for i, image_id in enumerate(image_ids): # Load image image = dataset.load_image(image_id) # Run detection t = time.time() r = model.detect([image], verbose=0)[0] t_prediction += (time.time() - t) r["class_ids"] = [x-1 for x in r["class_ids"]] # Convert results to COCO format # Cast masks to uint8 because COCO tools errors out on bool image_results = build_coco_results(dataset, coco_image_ids[i:i + 1], r["rois"], r["class_ids"], r["scores"], r["masks"].astype(np.uint8)) results.extend(image_results) # Load results. This modifies results with additional attributes. coco_results = coco.loadRes(results) # Evaluate cocoEval = COCOeval(coco, coco_results, eval_type) cocoEval.params.imgIds = coco_image_ids cocoEval.params.iouThrs = [0.1] cocoEval.params.areaRng = [[0, 10000000000.0]] cocoEval.params.maxDets = [100] cocoEval.evaluate() cocoEval.accumulate() precision = cocoEval.eval['precision'][0, :, 0, 0, 0] recall = cocoEval.params.recThrs plt.plot(recall, precision, 'ro') plt.xlabel('recall') plt.ylabel('precision') plt.title('PRC IoU 0,5') plt.savefig(fname='PRC' + str(limit) + '.jpg') print("Prediction time: {}. Average {}/image".format( t_prediction, t_prediction / len(image_ids))) print("Total time: ", time.time() - t_start) # Pick COCO images from the dataset # image_ids = image_ids or dataset.image_ids # Limit to a subset if limit: image_ids = image_ids[:limit] # Get corresponding COCO image IDs. coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids] t_prediction = 0 t_start = time.time() results = [] for i, image_id in enumerate(image_ids): # Load image image = dataset.load_image(image_id) # Run detection t = time.time() r = model.detect([image], verbose=0)[0] t_prediction += (time.time() - t) # r["class_ids"] = [x-1 for x in r["class_ids"]] # Convert results to COCO format # Cast masks to uint8 because COCO tools errors out on bool image_results = build_coco_results(dataset, coco_image_ids[i:i + 1], r["rois"], r["class_ids"], r["scores"], r["masks"].astype(np.uint8)) results.extend(image_results) # Load results. This modifies results with additional attributes. coco_results = coco.loadRes(results) # Evaluate cocoEval = COCOeval(coco, coco_results, eval_type) cocoEval.params.imgIds = coco_image_ids cocoEval.evaluate() cocoEval.accumulate() print('Original COCO metrics') sumcoco = cocoEval.summarize_coco() sumcoco = pd.DataFrame(sumcoco) print('Original PASCAL VOC metrics') sumvoc = cocoEval.summarize_voc() sumvoc = pd.DataFrame(sumvoc) sumcoco.to_csv('output_coco_%s.csv' % args.model[-6:]) sumvoc.to_csv('output_voc_%s.csv' % args.model[-6:])