예제 #1
0
def calc_APs(iou_thresh=0.5, use_07_metric=False):
    """
    计算每个类别的ap值
    :param iou_thresh:
    :param use_07_metric:
    :return:dict{cls:ap}
    """
    filename = os.path.join(result_path, 'comp4_det_test_{:s}.txt')
    cachedir = os.path.join(result_path, 'cache')
    annopath = os.path.join(DATA_PATH, 'val', '{:s}.xml')
    imagesetfile = os.path.join(os.getcwd(), 'valid.txt')
    APs = {}
    # for i, cls in enumerate(self.classes):
    for i, cls in enumerate(classes_pred):  # clw modify
        R, P, AP = voc_eval(filename, annopath, imagesetfile, cls, cachedir,
                            iou_thresh, use_07_metric)
        APs[cls] = AP
    if os.path.exists(cachedir):
        shutil.rmtree(cachedir)

    mAP = 0
    for i in APs:
        print("{} --> mAP : {}".format(i, APs[i]))
        mAP += APs[i]
    mAP = mAP / len(classes_pred)
    print('mAP:%g' % (mAP))

    return APs
예제 #2
0
 def _do_python_eval(self, output_dir = 'output'):
     print '--------------------------------------------------------------'
     print 'Computing results with **unofficial** Python eval code.'
     print 'Results should be very close to the official MATLAB eval code.'
     print 'Recompute with `./tools/reval.py --matlab ...` for your paper.'
     print '--------------------------------------------------------------'
     annopath = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'Annotations',
         '{:s}.xml')
     imagesetfile = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'ImageSets',
         'Main',
         self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
             use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
예제 #3
0
 def _do_python_eval(self, output_dir = 'output'):
     print '--------------------------------------------------------------'
     print 'Computing results with **unofficial** Python eval code.'
     print 'Results should be very close to the official MATLAB eval code.'
     print 'Recompute with `./tools/reval.py --matlab ...` for your paper.'
     print '--------------------------------------------------------------'
     annopath = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'Annotations',
         '{:s}.xml')
     imagesetfile = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'ImageSets',
         'Main',
         self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
             use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
예제 #4
0
def eval(net, dataset):
    for i, (inputs, box_targets, label_targets) in enumerate(dataloader):
        print('%d/%d' % (i, len(dataloader)))
        gt_boxes.append(box_targets.squeeze(0))
        gt_labels.append(label_targets.squeeze(0))

        loc_preds, cls_preds = net(Variable(inputs.cuda(), volatile=True))
        box_preds, label_preds, score_preds = box_coder.decode(
            loc_preds.cpu().data.squeeze(),
            F.softmax(cls_preds.squeeze(), dim=1).cpu().data,
            score_thresh=0.01)

        # for baidu race
        _, idx = score_preds.sort(descending=True)
        gt_label = label_preds[idx[0]].item()
        gt_score = score_preds[idx[0]].item()

        box_preds = box_preds[idx[:100]]
        label_preds = label_preds[idx[:100]]
        score_preds = score_preds[idx[:100]]
        ss = label_preds.eq(gt_label)
        score_preds = score_preds / gt_score
        xx = score_preds.gt(0.01)
        box_preds = box_preds[ss | xx]
        label_preds = label_preds[ss | xx]
        score_preds = score_preds[ss | xx]
        # score_preds = score_preds[ss | xx] / gt_score

        # origin solver
        pred_boxes.append(box_preds)
        pred_labels.append(label_preds)
        pred_scores.append(score_preds)

    print(
        voc_eval(pred_boxes,
                 pred_labels,
                 pred_scores,
                 gt_boxes,
                 gt_labels,
                 None,
                 iou_thresh=0.5,
                 use_07_metric=True))
예제 #5
0
def val():
    from utils.evaluate import write_results_voc
    import utils.postprocess as post
    from utils.voc_eval import voc_eval

    save_name = os.path.join(exp_dir, exp_dir.split('/')[-1] + '.pth')
    save_dict = torch.load(save_name)
    epoch = save_dict['epoch']
    state_dict = save_dict['net']
    net.load_state_dict(state_dict)
    net.cuda(device)

    # reset input_size for net in validation
    input_size = (416, 416)
    val_dataset.reset_resize_shape(input_size)
    net.reset_input_size(input_size)

    bs = val_loader.batch_size
    print("Val Epoch: {}".format(epoch))
    net.eval()
    val_loss = 0
    progressbar = tqdm(range(len(val_loader)))

    det_results = {}
    with torch.no_grad():
        for batch_idx, sample in enumerate(val_loader):
            img = sample['img'].float().to(device)
            boxes = sample['boxes']
            labels = sample['labels']
            yolo_outputs, loss = net(img, boxes, labels)
            val_loss += loss.item()

            for b in range(len(labels)):
                origin_size = sample['origin_size'][b]
                img_name = sample['img_name'][b]
                img_id = os.path.basename(img_name).split(".")[0]

                outs = []
                for yolo_out, anchor in zip(yolo_outputs, anchors):
                    y = yolo_out[b].detach().cpu().numpy()
                    if batch_idx == 0:
                        np.save(os.path.join(exp_dir, "debug.npy"), y)
                    out = post.yolo_postprocess(y, anchor, origin_size,
                                                (416, 416), OBJ_THRESHOLD)
                    if out is not None:
                        outs.append(out)
                if len(outs) == 0:
                    continue
                outs = np.vstack(outs).astype(np.float32)
                predict = post.yolo_nms(outs, NMS_THRESHOLD, post_nms=100)
                det_results[img_id] = predict

                if batch_idx < 50:
                    batch_img_tensorboard = []
                    img_name = sample['img_name'][b]
                    origin_img = cv2.imread(img_name)
                    h, w = origin_img.shape[:2]

                    # draw predict
                    draw = origin_img.copy()
                    for i, (x1, y1, x2, y2, cls_index) in enumerate(
                            predict[:, (*range(4), -1)].astype('int')):
                        if i > 10:  # just draw top 10 predictions
                            break
                        cv2.rectangle(draw, (x1, y1), (x2, y2), (0, 255, 0), 2)
                        class_name = val_dataset.label_to_cls_name[cls_index]
                        cv2.putText(draw, class_name, (x1, y1 + 25),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1.1,
                                    (0, 200, 55), 2)
                    draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
                    batch_img_tensorboard.append(draw)

                    # draw ground truth
                    draw = origin_img.copy()
                    box = boxes[b]
                    box = box / 416
                    box = box * [[w, h, w, h]]
                    for i, (x1, y1, x2, y2) in enumerate(box.astype('int')):
                        cv2.rectangle(draw, (x1, y1), (x2, y2), (0, 0, 255), 2)
                        class_name = val_dataset.label_to_cls_name[labels[b]
                                                                   [i]]
                        cv2.putText(draw, class_name, (x1, y1 + 25),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1.1,
                                    (0, 55, 200), 2)
                    draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
                    batch_img_tensorboard.append(draw)

                    tensorboard.image_summary("img_{}".format(batch_idx),
                                              batch_img_tensorboard, epoch)
            progressbar.set_description("batch loss: {:.3f}".format(
                loss.detach().cpu().data.numpy()))
            progressbar.update(1)

    progressbar.close()
    tensorboard.scalar_summary("val_loss", val_loss, epoch)
    tensorboard.writer.flush()

    if len(det_results) == 0:
        print("------------------------\n")
        return

    write_results_voc(det_results, val_dataset.label_to_cls_name,
                      os.path.join(exp_dir, "det_results"))

    det_path = os.path.join(exp_dir, "det_results", "{}.txt")
    annopath = os.path.join(VOC_DIR_PATH, "VOCdevkit",
                            "VOC{}".format(val_year), "Annotations", "{}.xml")
    imagesetfile = os.path.join(VOC_DIR_PATH, "VOCdevkit",
                                "VOC{}".format(val_year), "ImageSets", "Main",
                                "{}.txt".format(val_set))
    cache_dir = os.path.join(VOC_DIR_PATH, "cache")

    map = {}
    for class_name in val_dataset.class_names:
        ap = voc_eval(det_path,
                      annopath,
                      imagesetfile,
                      class_name,
                      cache_dir,
                      use_07_metric=False)[2]
        map[class_name] = ap
    with open(os.path.join(exp_dir, "map.txt"), "w") as f:
        f.writelines([
            "{}: {}\n".format(class_name, ap)
            for class_name, ap in map.items()
        ])
        f.write("map: {}".format(np.mean(list(map.values()))))

    print("val_map", np.mean(list(map.values())))
    tensorboard.scalar_summary("val_map", np.mean(list(map.values())), epoch)

    print("------------------------\n")