def main(args): DEVICE = torch.device("cuda:0" if torch.cuda.is_available() and args['flow_control']['use_cuda'] else "cpu") # eval_path = pathlib.Path(args.eval_dir) # eval_path.mkdir(exist_ok=True) if not os.path.exists(args['flow_control']['eval_dir']): os.mkdir(args['flow_control']['eval_dir']) timer = Timer() class_names = [ name.strip() for name in open(args['flow_control']['label_file']).readlines() ] _net = args['flow_control']['net'] _dataset_type = args['flow_control']['dataset_type'] if _dataset_type == "voc": raise NotImplementedError("Not implement error") dataset = VOCDataset(args['flow_control']['dataset'], is_test=True) elif _dataset_type == 'open_images': raise NotImplementedError("Not implement error") dataset = OpenImagesDataset(args['flow_control']['dataset'], dataset_type="test") elif _dataset_type == "coco": # dataset = CocoDetection("/home/wenyen4desh/datasets/coco/test2017","/home/wenyen4desh/datasets/annotations/image_info_test2017.json") #dataset = CocoDetection("../../dataset/datasets/coco/val2017","../../dataset/datasets/coco/annotations/instances_val2017.json") # dataset = CocoDetection("/home/wenyen4desh/datasets/coco/train2017","/home/wenyen4desh/datasets/coco/annotations/instances_train2017.json") dataset = CocoDetection(args['Datasets']['coco']['val_image_path'], args['Datasets']['coco']['val_anno_path']) elif _dataset_type == "ecp": dataset = EuroCity_Dataset(args['Datasets']['ecp']['val_image_path'], args['Datasets']['ecp']['val_anno_path']) true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class( dataset) if _net == 'vgg16-ssd': net = create_vgg_ssd(len(class_names), is_test=True) elif _net == 'mb1-ssd': net = create_mobilenetv1_ssd(len(class_names), is_test=True) elif _net == 'mb1-ssd-lite': net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True) elif _net == 'sq-ssd-lite': net = create_squeezenet_ssd_lite(len(class_names), is_test=True) elif _net == 'mb2-ssd-lite': net = create_mobilenetv2_ssd_lite( len(class_names), width_mult=args['flow_control']['mb2_width_mult'], is_test=True) else: logging.fatal( "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite." ) parser.print_help(sys.stderr) sys.exit(1) #train_transform = MatchPrior(config.priors, config.center_variance, # config.size_variance, 0.5) #test_transform = TestTransform(config.image_size, config.image_mean, config.image_std) import pdb pdb.set_trace() ############################## automatically validation ############################################ timer.start("Load Model") net.load(args['flow_control']['trained_model']) net = net.to(DEVICE) print('It took {} seconds to load the model.'.format( timer.end("Load Model"))) _nms_method = args['flow_control']['nms_method'] if _net == 'vgg16-ssd': predictor = create_vgg_ssd_predictor(net, nms_method=_nms_method, device=DEVICE) elif _net == 'mb1-ssd': predictor = create_mobilenetv1_ssd_predictor(net, nms_method=_nms_method, device=DEVICE) elif _net == 'mb1-ssd-lite': predictor = create_mobilenetv1_ssd_lite_predictor( net, nms_method=_nms_method, device=DEVICE) elif _net == 'sq-ssd-lite': predictor = create_squeezenet_ssd_lite_predictor( net, nms_method=_nms_method, device=DEVICE) elif _net == 'mb2-ssd-lite': predictor = create_mobilenetv2_ssd_lite_predictor( net, nms_method=_nms_method, device=DEVICE) else: logging.fatal( "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite." ) parser.print_help(sys.stderr) sys.exit(1) results = [] # Predict Bounding Box for i in range(len(dataset)): print("process image {}", i) timer.start("Load Image") image = dataset.get_image(i) print("Load Image: {:4f} seconds.".format(timer.end("Load Image"))) timer.start("Predict") boxes, labels, probs = predictor.predict(image) print("Prediction: {:4f} seconds.".format(timer.end("Predict"))) indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i results.append( torch.cat( [ indexes.reshape(-1, 1), labels.reshape(-1, 1).float(), probs.reshape(-1, 1), boxes + 1.0 # matlab's indexes start from 1 ], dim=1)) results = torch.cat(results) # Write the result to file for class_index, class_name in enumerate(class_names): if class_index == 0: continue # ignore background file_name = "det_test_{}.txt".format(class_name) prediction_path = os.path.join(args['flow_control']['eval_dir'], file_name) with open(prediction_path, "w") as f: sub = results[results[:, 1] == class_index, :] for i in range(sub.size(0)): prob_box = sub[i, 2:].numpy() image_id, _ = dataset.get_annotation(int(sub[i, 0])) f.write( str(image_id) + " " + " ".join([str(v) for v in prob_box]) + "\n") # image_id = dataset.ids[int(sub[i, 0])] # print(str(image_id) + " " + " ".join([str(v) for v in prob_box]), file=f) aps = [] prcs = [] recalls = [] print("\n\nAverage Precision Per-class:") for class_index, class_name in enumerate(class_names): if class_index == 0: continue file_name = "det_test_{}.txt".format(class_name) prediction_path = os.path.join(args['flow_control']['eval_dir'], file_name) # [email protected] evaluation method ap, precision, recall = compute_average_precision_per_class( args, true_case_stat[class_index], all_gb_boxes[class_index], all_difficult_cases[class_index], prediction_path, args['flow_control']['iou_threshold'], args['flow_control']['use_2007_metric']) # # COCO eval # ap, precision, recall = coco_ap_per_class( # true_case_stat[class_index], # all_gb_boxes[class_index], # all_difficult_cases[class_index], # prediction_path, # args.use_2007_metric # ) aps.append(ap) prcs.append(precision) recalls.append(recall) print("{}: {}".format(class_name, ap)) print("\nAverage Precision Across All Classes:{}".format( sum(aps[0:5]) / len(aps[0:5]))) print("\nAverage Precision :{}".format(sum(prcs[0:5]) / len(prcs[0:5]))) print("\nAverage Recall :{}".format(sum(recalls[0:5]) / len(recalls[0:5])))
# image = imutils.resize(image,image.shape[1], image.shape[0] * 2) image = cv2.resize(src_image, (src_image.shape[1], src_image.shape[0] * 2)) max_edge = max(image.shape[0], image.shape[1]) image = cv2.copyMakeBorder(image, 0, max_edge - image.shape[0], 0, max_edge - image.shape[1], cv2.BORDER_CONSTANT) print("Load Image: {:4f} seconds.".format(timer.end("Load Image"))) timer.start("Predict") boxes, labels, probs, masks, angle, Matrix, factor = predictor.predict( image, gt_mask) print("Prediction: {:4f} seconds.".format(timer.end("Predict"))) orig_image = image.copy() image_id, annotation = dataset.get_annotation(i) gt_boxes, classes, is_difficult = annotation seg_mask = masks[0] seg_mask = torch.squeeze(seg_mask) seg_mask = seg_mask.cpu().detach().numpy().astype(np.float32) ran_str = ''.join( random.sample(string.ascii_letters + string.digits, 20)) ocr_cropped_bbox = 'eval_results/bbox_v2/' + ran_str + ".png" ocr_cropped_heatmap = 'eval_results/heatmap22222_v2/' + ran_str + ".png" print(factor) final_pts = [] for i in range(boxes.size(0)): if probs[i] > 0.4: box = boxes[i, :]
probs.reshape(-1, 1), boxes + 1.0 # matlab's indexes start from 1 ], dim=1)) results = torch.cat(results) # Write the result to file for class_index, class_name in enumerate(class_names): if class_index == 0: continue # ignore background file_name = "det_test_{}.txt".format(class_name) prediction_path = os.path.join(args.eval_dir, file_name) with open(prediction_path, "w") as f: sub = results[results[:, 1] == class_index, :] for i in range(sub.size(0)): prob_box = sub[i, 2:].numpy() image_id, _ = dataset.get_annotation(int(sub[i, 0])) f.write( str(image_id) + " " + " ".join([str(v) for v in prob_box]) + "\n") # image_id = dataset.ids[int(sub[i, 0])] # print(str(image_id) + " " + " ".join([str(v) for v in prob_box]), file=f) aps = [] prcs = [] recalls = [] print("\n\nAverage Precision Per-class:") for class_index, class_name in enumerate(class_names): if class_index == 0: continue file_name = "det_test_{}.txt".format(class_name) prediction_path = os.path.join(args.eval_dir, file_name)
indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i results.append( torch.cat( [ indexes.reshape(-1, 1), labels.reshape(-1, 1).float(), probs.reshape(-1, 1), boxes + 1.0 # matlab's indexes start from 1 ], dim=1)) # draw eval results save_folder = './eval_imgs' if not os.path.exists(save_folder): os.mkdir(save_folder) frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) img_name, _ = dataset.get_annotation(i) for ind in range(len(probs)): if probs[ind] > args.iou_threshold: pt = boxes[ind].cpu().numpy() #paras are image, top left coordinate, bottom right, color, line thickness. cv2.rectangle(frame, (int(pt[0]), int(pt[1])), (int(pt[2]), int(pt[3])), COLORS[labels[ind] % 4], 2) text = labelmap[labels[ind]] + ': ' + '{:.2f}'.format( probs[ind].cpu().numpy()) cv2.putText(frame, text, (int(pt[0]), int(pt[1])), FONT, 1, (255, 255, 255), 2, cv2.LINE_AA) cv2.imwrite(save_folder + '/' + img_name + '.jpg', frame) results = torch.cat(results) for class_index, class_name in enumerate(class_names):