예제 #1
0
    det_file = os.path.join(save_folder, 'detections.pkl')
    # load data
    if args.dataset == 'VOC':
        testset = VOCDetection(VOCroot, [('2007', 'test')], None,
                               AnnotationTransform())
    elif args.dataset == 'COCO':
        testset = COCODetection(COCOroot, [('2014', 'minival')], None)
        #COCOroot, [('2015', 'test-dev')], None)
    else:
        print('Only VOC and COCO dataset are supported now!')

    if args.retest:
        f = open(det_file, 'rb')
        all_boxes = pickle.load(f)
        print('Evaluating detections')
        testset.evaluate_detections(all_boxes, save_folder)
    else:
        # load net
        img_dim = (300, 512)[args.size == '512']
        num_classes = (21, 81)[args.dataset == 'COCO']
        net = build_net('test', img_dim, num_classes)  # initialize detector
        state_dict = torch.load(args.trained_model,
                                map_location='cuda:{}'.format(gpu_id))
        # create new OrderedDict that does not contain `module.`

        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
예제 #2
0
def do_test(args, model, detector, max_per_image=200, thresh=0.01):
    if args.dataset == 'VOC':
        dataset = VOCDetection(
            args, VOCroot, [('2007', 'test')], None,
            AnnotationTransform(0 if args.setting ==
                                'transfer' else args.split), True)
    elif args.dataset == 'COCO':
        dataset = COCODetection(COCOroot, [('2014', 'split_nonvoc_minival')],
                                None)
    else:
        raise ValueError(f"Unknown dataset: {args.dataset}")

    num_images = len(dataset)
    all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
    transform = BaseTransform(model.size, rgb_means, (2, 0, 1))

    _t = {'im_detect': Timer(), 'misc': Timer()}
    det_file = os.path.join(args.save_folder, 'detections.pkl')

    if args.retest:
        f = open(det_file, 'rb')
        all_boxes = pickle.load(f)
        logger.info('Evaluating detections')
        dataset.evaluate_detections(all_boxes, args.save_folder)
        return

    for i in range(num_images):
        img = dataset.pull_image(i)
        scale = torch.Tensor(
            [img.shape[1], img.shape[0], img.shape[1],
             img.shape[0]]).to(model.device)
        with torch.no_grad():
            x = transform(img).unsqueeze(0)

        _t['im_detect'].tic()

        pred = model(x)  # forward pass
        boxes, scores = detector.forward(pred, priors)
        detect_time = _t['im_detect'].toc()
        boxes = boxes[0]  # percent and point form detection boxes
        scores = scores[0]  # [1, num_priors, num_classes]

        boxes *= scale  # scale each detection back up to the image
        boxes = boxes.cpu().numpy()
        scores = scores.cpu().numpy()

        _t['misc'].tic()

        for j in range(1, num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            if len(inds) == 0:
                all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
                continue
            c_bboxes = boxes[inds]
            c_scores = scores[inds, j]
            c_dets = np.hstack(
                (c_bboxes, c_scores[:, np.newaxis])).astype(np.float32,
                                                            copy=False)

            keep = nms(c_dets, 0.45, force_cpu=args.cpu)
            c_dets = c_dets[keep, :]
            all_boxes[j][i] = c_dets
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in range(1, num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in range(1, num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]

        nms_time = _t['misc'].toc()

        if i % 20 == 0:
            logger.info('im_detect: {:d}/{:d} {:.3f}s {:.3f}s'.format(
                i + 1, num_images, detect_time, nms_time))
            _t['im_detect'].clear()
            _t['misc'].clear()

    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    logger.info('Evaluating detections')
    dataset.evaluate_detections(all_boxes, args.save_folder)