Пример #1
0
def test_net(net, imdb, max_per_image=100, thresh=0.05, boxes_num_per_batch=0, vis=False, startIdx=0, endIdx=-1, saveMat=False, svm=False, use_wzctx=True):
    """Test a Fast R-CNN network on an image database."""
    if use_wzctx:
       print "use use_wzctx!!!" 
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    #print "4"
    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    output_dir = get_output_dir(imdb, net)

    # timers
    _t = {'im_detect' : Timer(), 'misc' : Timer()}
    #print "5"
    if not cfg.TEST.HAS_RPN:
        roidb = imdb.roidb
    
    if endIdx==-1:
        endIdx=num_images
    #print "6"
    for i in xrange(num_images):
        # filter out any ground truth boxes
        if i < startIdx or i>=endIdx:
            continue
        if cfg.TEST.HAS_RPN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select those the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            #print "x"
            box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
            #print "y"
        im_name = imdb.image_path_at(i)
        im_name = im_name.split('/')[-1]
        im_name = im_name.split('.')[0]
        im = cv2.imread(imdb.image_path_at(i))
        _t['im_detect'].tic()
        #print "boxes_num %d"%boxes_num_per_batch
        if boxes_num_per_batch > 0:
            num_boxes = box_proposals.shape[0]
            num_batch = (num_boxes + boxes_num_per_batch -1) / boxes_num_per_batch
            #print "zzz"
            #num_boxes = roidb[i]['boxes'].shape[0]
            #num_batch = math.ceil(num_boxes/boxes_num_per_batch)
            scores_batch = np.zeros((num_batch*boxes_num_per_batch, imdb.num_classes), dtype=np.float32)
            boxes_batch = np.zeros((num_batch*boxes_num_per_batch, 4*imdb.num_classes), dtype=np.float32)
            # replicate the first box num_batch*boxes_num_per_batch times for preallocation
            rois = np.tile(box_proposals[0, :], (num_batch*boxes_num_per_batch, 1))         
            #print "xx"
            # assign real boxes to rois
            rois[:num_boxes, :] = box_proposals
            #print "num_batch: %d"%num_batch
            for j in xrange(int(num_batch)):
                roi = rois[j*boxes_num_per_batch:(j+1)*boxes_num_per_batch, :]
                #print roi.shape
                score, box = im_detect(net, im, roi, svm, use_wzctx)
                scores_batch[j*boxes_num_per_batch:(j+1)*boxes_num_per_batch, :] = score# [:,:,0,0]
                boxes_batch[j*boxes_num_per_batch:(j+1)*boxes_num_per_batch, :] = box
               # print "6_%d"%j
            # discard duplicated results
            scores = scores_batch[:num_boxes, :]
            #print "kx"
            boxes = boxes_batch[:num_boxes, :]
        else:
            #print box_proposals.shape[0]
            scores, boxes = im_detect(net, im, box_proposals, svm, use_wzctx)
        mat_dir = os.path.join(output_dir, 'stage%s'%startIdx)
        if not os.path.exists(mat_dir):
            os.mkdir(mat_dir)
        if True:
            sio.savemat('%s/%s.mat' % (mat_dir,im_name + '_' + str(i) ), {'scores': scores, 'boxes': boxes})
        
        _t['im_detect'].toc()

        _t['misc'].tic()
        # skip j = 0, because it's the background class
        #print "7"        
        for j in xrange(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j*4:(j+1)*4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
                .astype(np.float32, copy=False)
            keep = nms(cls_dets, cfg.TEST.NMS)
            if cfg.TEST.BBOX_VOTE:
                cls_dets_after_nms = cls_dets[keep, :]
                cls_dets = bbox_voting(cls_dets_after_nms, cls_dets, threshold=cfg.TEST.BBOX_VOTE_THRESH)
            else:
                cls_dets = cls_dets[keep, :]
            if vis:
                vis_detections(im, imdb.classes[j], cls_dets)
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack([all_boxes[j][i][:, -1]
                                      for j in xrange(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in xrange(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
              .format(i + 1, num_images, _t['im_detect'].average_time,
                      _t['misc'].average_time)

    #det_file = os.path.join(output_dir, 'detection_%sto%s.pkl' % (startIdx,endIdx))
    #with open(det_file, 'wb') as f:
    #    cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    print 'Evaluating detections'
    imdb.evaluate_detections(all_boxes, output_dir, startIdx, endIdx)
Пример #2
0
    with open(args.aboxes) as f:
        all_boxes = cPickle.load(f)

    num_images = len(imdb.image_index)

    for i in xrange(num_images):
        print 'Image', i + 1, 'of', num_images
        sys.stdout.flush()
        # skip j = 0, because it's the background class
        for j in xrange(1, imdb.num_classes):
            cls_dets = all_boxes[j][i]
            keep = nms(cls_dets, cfg.TEST.NMS)
            if cfg.TEST.BBOX_VOTE:
                cls_dets_after_nms = cls_dets[keep, :]
                cls_dets = bbox_voting(cls_dets_after_nms,
                                       cls_dets,
                                       threshold=cfg.TEST.BBOX_VOTE_THRESH)
            else:
                cls_dets = cls_dets[keep]
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in xrange(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in xrange(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
Пример #3
0
def test_net(net, imdb, max_per_image=100, thresh=0.05, boxes_num_per_batch=0, vis=False):
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    raw_all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    output_dir = get_output_dir(imdb, net)

    # timers
    _t = {'im_detect' : Timer(), 'misc' : Timer()}

    if not cfg.TEST.HAS_RPN:
        roidb = imdb.roidb

    for i in xrange(num_images):
        # filter out any ground truth boxes
        if cfg.TEST.HAS_RPN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select those the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]

            num_boxes = box_proposals.shape[0]
            if num_boxes < 1:
                print 'Oops, {} does not have any bbox!'.format(imdb.image_path_at(i))
                continue

        im = cv2.imread(imdb.image_path_at(i))
        _t['im_detect'].tic()
        if boxes_num_per_batch > 0:
            num_batch = (num_boxes + boxes_num_per_batch -1) / boxes_num_per_batch
            scores_batch = np.zeros((num_batch*boxes_num_per_batch, imdb.num_classes), dtype=np.float32)
            boxes_batch = np.zeros((num_batch*boxes_num_per_batch, 4*imdb.num_classes), dtype=np.float32)
            # replicate the first box num_batch*boxes_num_per_batch times for preallocation
            rois = np.tile(box_proposals[0, :], (num_batch*boxes_num_per_batch, 1))
            # assign real boxes to rois
            rois[:num_boxes, :] = box_proposals
            for j in xrange(num_batch):
                roi = rois[j*boxes_num_per_batch:(j+1)*boxes_num_per_batch, :]
                score, box = im_detect(net, im, roi)
                scores_batch[j*boxes_num_per_batch:(j+1)*boxes_num_per_batch, :] = score
                boxes_batch[j*boxes_num_per_batch:(j+1)*boxes_num_per_batch, :] = box
            # discard duplicated results
            scores = scores_batch[:num_boxes, :]
            boxes = boxes_batch[:num_boxes, :]
        else:
            scores, boxes = im_detect(net, im, box_proposals)
        _t['im_detect'].toc()

        if cfg.TEST.SAVE_MAT:
            mat_dir = os.path.join(output_dir, imdb._image_set)
            if not os.path.exists(mat_dir):
                os.mkdir(mat_dir)
            im_name = os.path.splitext(os.path.basename(imdb.image_path_at(i)))[0]
            sio.savemat('%s/%s.mat' % (mat_dir, im_name), {'scores': scores, 'boxes': boxes})

        _t['misc'].tic()
        # skip j = 0, because it's the background class
        for j in xrange(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j*4:(j+1)*4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
                .astype(np.float32, copy=False)
            raw_all_boxes[j][i] = cls_dets
            if cfg.TEST.SOFT_NMS:
                keep = soft_nms(cls_dets, method=cfg.TEST.SOFT_NMS_METHOD)
            else:
                keep = nms(cls_dets, cfg.TEST.NMS)
            if cfg.TEST.BBOX_VOTE:
                cls_dets_after_nms = cls_dets[keep, :]
                cls_dets = bbox_voting(cls_dets_after_nms, cls_dets, threshold=cfg.TEST.BBOX_VOTE_THRESH)
            else:
                cls_dets = cls_dets[keep, :]
            if vis:
                vis_detections(im, imdb.classes[j], cls_dets)
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack([all_boxes[j][i][:, -1]
                                      for j in xrange(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in xrange(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
              .format(i + 1, num_images, _t['im_detect'].average_time,
                      _t['misc'].average_time)

    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    if cfg.TEST.CACHE_RAW_ABOXES:
        raw_aboxes_file = os.path.join(output_dir, 'raw_all_boxes.pkl')
        with open(raw_aboxes_file, 'wb') as f:
            cPickle.dump(raw_all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    print 'Evaluating detections'
    imdb.evaluate_detections(all_boxes, output_dir)