def main(): parser = argparse.ArgumentParser( description="SSD Evaluation on VOC Dataset.") parser.add_argument("--trained_model", type=str) parser.add_argument( "--dataset_type", default="voc", type=str, help='Specify dataset type. Currently support voc and open_images.') parser.add_argument( "--dataset", type=str, help="The root directory of the VOC dataset or Open Images dataset.") parser.add_argument("--label_file", type=str, help="The label file path.") parser.add_argument("--use_cuda", type=str2bool, default=True) parser.add_argument("--use_2007_metric", type=str2bool, default=True) parser.add_argument("--nms_method", type=str, default="hard") parser.add_argument("--iou_threshold", type=float, default=0.5, help="The threshold of Intersection over Union.") parser.add_argument("--eval_dir", default="eval_results", type=str, help="The directory to store evaluation results.") parser.add_argument('--mb2_width_mult', default=1.0, type=float, help='Width Multiplifier for MobilenetV2') args = parser.parse_args() DEVICE = torch.device( "cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu") eval_path = pathlib.Path(args.eval_dir) eval_path.mkdir(exist_ok=True) timer = Timer() class_names = [name.strip() for name in open(args.label_file).readlines()] if args.dataset_type == "voc": dataset = VOCDataset(args.dataset, is_test=True) true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class( dataset) net = create_mobilenetv2_ssd_lite(len(class_names), width_mult=args.mb2_width_mult, is_test=True) timer.start("Load Model") net.load(args.trained_model) net = net.to(DEVICE) print(f'It took {timer.end("Load Model")} seconds to load the model.') predictor = create_mobilenetv2_ssd_lite_predictor( net, nms_method=args.nms_method, device=DEVICE) results = [] for i in range(len(dataset)): print("process image", i) timer.start("Load Image") image = dataset.get_image(i) print("Load Image: {:4f} seconds.".format(timer.end("Load Image"))) timer.start("Predict") boxes, labels, probs = predictor.predict(image) print("Prediction: {:4f} seconds.".format(timer.end("Predict"))) indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i results.append( torch.cat( [ indexes.reshape(-1, 1), labels.reshape(-1, 1).float(), probs.reshape(-1, 1), boxes + 1.0 # matlab's indexes start from 1 ], dim=1)) results = torch.cat(results) for class_index, class_name in enumerate(class_names): if class_index == 0: continue # ignore background prediction_path = eval_path / f"det_test_{class_name}.txt" with open(prediction_path, "w") as f: sub = results[results[:, 1] == class_index, :] for i in range(sub.size(0)): prob_box = sub[i, 2:].numpy() image_id = dataset.ids[int(sub[i, 0])] print(image_id + " " + " ".join([str(v) for v in prob_box]), file=f) aps = [] print("\n\nAverage Precision Per-class:") for class_index, class_name in enumerate(class_names): if class_index == 0: continue prediction_path = eval_path / f"det_test_{class_name}.txt" ap = compute_average_precision_per_class( true_case_stat[class_index], all_gb_boxes[class_index], all_difficult_cases[class_index], prediction_path, args.iou_threshold, args.use_2007_metric) aps.append(ap) print(f"{class_name}: {ap}") print(f"\nAverage Precision Across All Classes:{sum(aps)/len(aps)}")
def get_map(net_para, dataset, label_file): DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") eval_path = pathlib.Path("eval_results") eval_path.mkdir(exist_ok=True) timer = Timer() class_names = [name.strip() for name in open(label_file).readlines()] dataset = VOCDataset(dataset, is_test=True) true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class( dataset) net = create_mobilenetv2_ssd_lite(len(class_names), width_mult=1.0, is_test=True) timer.start("Load Model") net.load_weight(net_para) net = net.to(DEVICE) predictor = create_mobilenetv2_ssd_lite_predictor(net, nms_method="hard", device=DEVICE) results = [] for i in tqdm(range(len(dataset))): timer.start("Load Image") image = dataset.get_image(i) timer.start("Predict") boxes, labels, probs = predictor.predict(image) indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i results.append( torch.cat( [ indexes.reshape(-1, 1), labels.reshape(-1, 1).float(), probs.reshape(-1, 1), boxes + 1.0 # matlab's indexes start from 1 ], dim=1)) results = torch.cat(results) for class_index, class_name in enumerate(class_names): if class_index == 0: continue # ignore background prediction_path = eval_path / f"det_test_{class_name}.txt" with open(prediction_path, "w") as f: sub = results[results[:, 1] == class_index, :] for i in range(sub.size(0)): prob_box = sub[i, 2:].numpy() image_id = dataset.ids[int(sub[i, 0])] print(image_id + " " + " ".join([str(v) for v in prob_box]), file=f) aps = [] print("\n\nAverage Precision Per-class:") for class_index, class_name in enumerate(class_names): if class_index == 0: continue prediction_path = eval_path / f"det_test_{class_name}.txt" ap = compute_average_precision_per_class( true_case_stat[class_index], all_gb_boxes[class_index], all_difficult_cases[class_index], prediction_path, 0.5, True) aps.append(ap) print(f"{class_name}: {ap}") print(f"\nAverage Precision Across All Classes:{sum(aps) / len(aps)}") return sum(aps) / len(aps)