def eval(net, dataset): for i, (inputs, box_targets, label_targets) in enumerate(dataloader): print('%d/%d' % (i, len(dataloader))) gt_boxes.append(box_targets.squeeze(0)) gt_labels.append(label_targets.squeeze(0)) loc_preds, cls_preds = net(Variable(inputs.cuda(), volatile=True)) box_preds, label_preds, score_preds = box_coder.decode( loc_preds.cuda().data.squeeze(), F.softmax(cls_preds.squeeze(), dim=1).cuda().data, score_thresh=0.01) pred_boxes.append(box_preds) pred_labels.append(label_preds) pred_scores.append(score_preds) print( voc_eval(pred_boxes, pred_labels, pred_scores, gt_boxes, gt_labels, gt_difficults, iou_thresh=0.5, use_07_metric=True))
def eval(net): net.eval() def transform(img, boxes, labels): img, boxes = resize(img, boxes, size=(img_size, img_size)) img = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ])(img) return img, boxes, labels dataset = ListDataset(root=args.data_root, \ list_file=args.voc07_test, transform=transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=8) box_coder = SSDBoxCoder(net) pred_boxes = [] pred_labels = [] pred_scores = [] gt_boxes = [] gt_labels = [] with open('torchcv/datasets/voc/voc07_test_difficult.txt') as f: gt_difficults = [] for line in f.readlines(): line = line.strip().split() d = np.array([int(x) for x in line[1:]]) gt_difficults.append(d) for i, (inputs, box_targets, label_targets) in enumerate(dataloader): print('%d/%d' % (i, len(dataloader))) gt_boxes.append(box_targets.squeeze(0)) gt_labels.append(label_targets.squeeze(0)) loc_preds, cls_preds = net(Variable(inputs.cuda(), volatile=True)) box_preds, label_preds, score_preds = box_coder.decode( loc_preds.cpu().data.squeeze(), F.softmax(cls_preds.squeeze(), dim=1).cpu().data, score_thresh=0.01) pred_boxes.append(box_preds) pred_labels.append(label_preds) pred_scores.append(score_preds) aps = (voc_eval(pred_boxes, pred_labels, pred_scores, gt_boxes, gt_labels, gt_difficults, iou_thresh=0.5, use_07_metric=True)) net.train() return aps
def eval(net, test_num=10000): net.eval() def transform(img, boxes, labels): img, boxes = resize(img, boxes, size=(opt.img_size, opt.img_size)) img = transforms.Compose([transforms.ToTensor(), caffe_normalize])(img) return img, boxes, labels dataset = ListDataset(root=opt.eval_img_root, list_file=opt.eval_img_list, transform=transform) box_coder = SSDBoxCoder(net.module) pred_boxes = [] pred_labels = [] pred_scores = [] gt_boxes = [] gt_labels = [] # with open('torchcv/datasets/voc/voc07_test_difficult.txt') as f: # gt_difficults = [] # for line in f.readlines(): # line = line.strip().split() # d = np.array([int(x) for x in line[1:]]) # gt_difficults.append(d) nums_img = dataset.__len__() for i in tqdm(range(nums_img)): inputs, box_targets, label_targets = dataset.__getitem__(i) gt_boxes.append(box_targets) gt_labels.append(label_targets) inputs = inputs.unsqueeze(0) with torch.no_grad(): loc_preds, cls_preds = net(Variable(inputs.cuda())) box_preds, label_preds, score_preds = box_coder.decode( loc_preds.cpu().data.squeeze(), F.softmax(cls_preds.squeeze(), dim=1).cpu().data, score_thresh=0.1) pred_boxes.append(box_preds) pred_labels.append(label_preds) pred_scores.append(score_preds) aps = (voc_eval(pred_boxes, pred_labels, pred_scores, gt_boxes, gt_labels, gt_difficults=None, iou_thresh=0.5, use_07_metric=False)) net.train() return aps
def evaluate(net, img_dir, list_file, img_size, test_code): net.cuda() net.eval() def transform(img, boxes, labels): img, boxes = resize(img, boxes, size=(img_size, img_size)) img = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ])(img) return img, boxes, labels print('Loading dataset..') dataset = ListDataset(root=img_dir, list_file=list_file, transform=transform) if test_code: dataset.num_imgs = 1 dl = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2) box_coder = SSDBoxCoder(net) pred_boxes = [] pred_labels = [] pred_scores = [] gt_boxes = [] gt_labels = [] tqdm_dl = tqdm(dl, desc="Evaluate", ncols=0) for i, (inputs, box_targets, label_targets) in enumerate(tqdm_dl): gt_boxes.append(box_targets.squeeze(0)) gt_labels.append(label_targets.squeeze(0)) loc_preds, cls_preds = net(Variable(inputs.cuda(), volatile=True)) box_preds, label_preds, score_preds = box_coder.decode( loc_preds.cpu().data.squeeze(), F.softmax(cls_preds.squeeze(), dim=1).cpu().data, score_thresh=0.01) pred_boxes.append(box_preds) pred_labels.append(label_preds) pred_scores.append(score_preds) ap_map_dict = voc_eval(pred_boxes, pred_labels, pred_scores, gt_boxes, gt_labels, iou_thresh=0.5, use_07_metric=False) return ap_map_dict
nums_img = dataset.__len__() for i in tqdm(range(nums_img)): inputs, box_targets, label_targets = dataset.__getitem__(i) gt_boxes.append(box_targets) gt_labels.append(label_targets) inputs = inputs.unsqueeze(0) with torch.no_grad(): loc_preds, cls_preds = net(Variable(inputs.cuda())) box_preds, label_preds, score_preds = box_coder.decode( loc_preds.cpu().data.squeeze(), F.softmax(cls_preds.squeeze(), dim=1).cpu().data, score_thresh=0.1) pred_boxes.append(box_preds) pred_labels.append(label_preds) pred_scores.append(score_preds) print('Caculating AP..') aps = voc_eval(pred_boxes, pred_labels, pred_scores, gt_boxes, gt_labels, gt_difficults=None, iou_thresh=0.5, use_07_metric=False) print('ap = ', aps['ap']) print('map = ', aps['map'])