def test_unknown_category(self): dataset = "coco_2017_val_100" evaluator = COCOEvaluator(dataset) evaluator.reset() inputs = DatasetCatalog.get(dataset)[:2] pred = Instances((100, 100)) pred.pred_boxes = Boxes(torch.rand(2, 4)) pred.scores = torch.rand(2) pred.pred_classes = torch.tensor([10, 80]) output = {"instances": pred} evaluator.process(inputs, [output, output]) with self.assertRaises(AssertionError): evaluator.evaluate()
def coco_evaluation(cfg, dataset_name): predictor = DefaultPredictor(cfg) dataset_name_with_prefix = get_name_with_prefix(dataset_name, cfg.DATASETS.USE_DIRECTION_CLASSES) evaluator = COCOEvaluator(dataset_name_with_prefix, cfg, distributed=False, output_dir=cfg.OUTPUT_DIR) predictions = [predictor(img) for img in load_all_image_in_dataset(dataset_name, cfg)] dataset = get_dataset(dataset_name, cfg) evaluator.reset() evaluator.process(dataset, predictions) return evaluator.evaluate()
def KITTIMOTS_evaluation_task(model_name, model_file): path = os.path.join(SAVE_PATH, 'eval_inf_task', model_name) if not os.path.exists(path): os.makedirs(path) # Load Data print('Loading Data.') dataloader = KITTIMOTS_Dataloader() def kitti_test(): return dataloader.get_dicts(train_flag=False) DatasetCatalog.register("KITTIMOTS_test", kitti_test) MetadataCatalog.get("KITTIMOTS_test").set( thing_classes=list(KITTI_CATEGORIES.keys())) # Load MODEL and Configuration print('Loading Model.') cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file(model_file)) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 cfg.OUTPUT_DIR = SAVE_PATH cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file) predictor = DefaultPredictor(cfg) predictions_path = os.path.join(SAVE_PATH, "predictions.pkl") if (os.path.exists(predictions_path)): predictions = pickle.load(open(predictions_path, "rb")) else: print('Using Model to predict on input') predictions = [] for i, input_test in enumerate(kitti_test()): img_path = input_test['file_name'] img = cv2.imread(img_path) prediction = predictor(img) predictions.append(prediction) pickle.dump(predictions, open(predictions_path, "wb")) print('Predictions length ' + str(len(predictions))) print('Inputs length ' + str(len(kitti_test()))) # Evaluation print('Evaluating......') evaluator = COCOEvaluator('KITTIMOTS_test', cfg, False, output_dir="./output/") evaluator.reset() evaluator.process(kitti_test(), predictions) evaluator.evaluate()
image["image_id"] = os.path.splitext(os.path.split(image["image_id"])[-1])[0] # Evaluating the prediction: # Setting up the COCO evaluation: coco_evaluator = COCOEvaluator(dataset_name, cfg, distributed=True, output_dir=output_folder_path) # Setting up the PASCAL VOC evaluation: my_dataset.dirname = os.path.join(output_folder_path, "PascalVOCAnnotations") my_dataset.split = 'test' my_dataset.year = 2012 dataset.to_pascal(my_dataset.dirname) # Converting the dataset to PASCAL VOC pascal_evaluator = PascalVOCDetectionEvaluator(dataset_name) # COCO evaluating: coco_evaluator.reset() coco_evaluator.process(images, outputs_list) coco_results = coco_evaluator.evaluate() # Dumping the COCO evaluation results: coco_results_file_path = os.path.join(output_folder_path, "coco_eval_results.json") with open(coco_results_file_path, "w") as coco_results_file: json.dump(coco_results, coco_results_file, indent=2) print("COCO EVALUATION FINISHED") # PASCAL VOC evaluating: pascal_evaluator.reset() pascal_evaluator.process(images, outputs_list) pascal_results = pascal_evaluator.evaluate()
] data_loader = build_detection_test_loader(cfg, "my_dataset") # Define evaluator evaluator = COCOEvaluator("my_dataset", ("bbox", ), False, output_dir=args.output) # Evaluation with torch.no_grad(): def get_all_inputs_outputs(): image_id = 0 for data in data_loader: img = read_image(data[0]["file_name"], format="BGR") pred = model(img) out = [] out.append(pred) image_id = image_id + 1 yield data, out evaluator.reset() for inputs, outputs in get_all_inputs_outputs(): evaluator.process(inputs, outputs) eval_results = evaluator.evaluate( ) # Compare preditions to annotations print(eval_results) results_file = args.output + 'eval_results.txt' with open(results_file, 'a', encoding='utf-8') as f: f.write(str(eval_results))