예제 #1
0
 imdb.evaluate_detections(dets)
 result = []
 if 1:
     classes = imdb.classes
     idx_image = 0
     for j, index in enumerate(imdb.image_index):
         curr_boxes = []
         for i, cls in enumerate(classes):
             if i == 0:
                 continue
             box_info = dets[i][j]
             if type(box_info) is list and len(box_info) == 0:
                 continue
             assert type(box_info) == np.ndarray, (box_info, type(box_info))
             im = cv2.imread(imdb.image_path_at(j))
             vis_detections(im, str(cls), box_info, thresh= 0.5)
             plt.show()
             ind = box_info[:, -1] > 0.5
             selected = box_info[ind, :]
             for k in range(selected.shape[0]):
                 s = selected[k, :]
                 curr_boxes.append({'x1y1x2y2': '{} {} {} {}'.format(int(s[0]), int(s[1]), int(s[2]), int(s[3])), \
                         'label': cls, \
                         'confidence': float(s[4])})
         curr_result = {'name': imdb.image_path_at(j), \
                 'boxes': curr_boxes}
         result.append(curr_result)
     result = dict(images = result)
     pprint(result)
     output_yaml = args.output_yaml
     if output_yaml == None:
예제 #2
0
 imdb.evaluate_detections(dets)
 result = []
 if 1:
     classes = imdb.classes
     idx_image = 0
     for j, index in enumerate(imdb.image_index):
         curr_boxes = []
         for i, cls in enumerate(classes):
             if i == 0:
                 continue
             box_info = dets[i][j]
             if type(box_info) is list and len(box_info) == 0:
                 continue
             assert type(box_info) == np.ndarray, (box_info, type(box_info))
             im = cv2.imread(imdb.image_path_at(j))
             vis_detections(im, str(cls), box_info, thresh=0.5)
             plt.show()
             ind = box_info[:, -1] > 0.5
             selected = box_info[ind, :]
             for k in range(selected.shape[0]):
                 s = selected[k, :]
                 curr_boxes.append({'x1y1x2y2': '{} {} {} {}'.format(int(s[0]), int(s[1]), int(s[2]), int(s[3])), \
                         'label': cls, \
                         'confidence': float(s[4])})
         curr_result = {'name': imdb.image_path_at(j), \
                 'boxes': curr_boxes}
         result.append(curr_result)
     result = dict(images=result)
     pprint(result)
     output_yaml = args.output_yaml
     if output_yaml == None:
예제 #3
0
    ]

    for im_name in im_names:
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Demo for data/demo/{}'.format(im_name)

        # Load the demo image
        im_file = os.path.join(cfg.DATA_DIR, 'demo', im_name)

        timer = Timer()
        timer.tic()
        im = pvanet.read_img(im_file)
        dets = pvanet.process_img(im)
        timer.toc()
        print('Detection took {:.3f}s'.format(timer.total_time))

        # Visualize detections for each class
        CONF_THRESH = 0.7
        for cls_ind, cls in enumerate(CLASSES[1:]):
            cls_ind += 1  # because we skipped background

            det_this_cls = dets[np.where(dets[:, 0] == cls_ind)[0]]
            boxes_and_scores = det_this_cls[:, 1:]

            if len(boxes_and_scores) > 0:
                vis_detections(im,
                               cls,
                               np.array(boxes_and_scores),
                               thresh=CONF_THRESH)

    plt.show()
예제 #4
0
import cv2

if __name__ == '__main__':
    imdb_name = 'imagenet_2015_val'
    #test_name = 'imagenet_bvlc_fast_rcnn_stage1_iter_125000'
    test_name = 'vgg16_fast_rcnn_iter_800000'
    det_name = '../output/default/' + imdb_name + '/' + test_name + '/detections.pkl'
    cfg.TRAIN.PROPOSAL_METHOD = 'gt'
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals
    
    imdb = get_imdb(imdb_name)
    all_boxes = cPickle.load(open(det_name,'rb'))
    
    for i in xrange(len(imdb._image_index)) :
        im_path = imdb.image_path_at(i)
        im = cv2.imread( im_path )
        cls_set = []
        det_set = []
        
        for cls_ind in xrange(1,len(imdb.classes)):
            dets = all_boxes[cls_ind][i]
            keep = nms(dets, 0.3)
            dets = dets[keep, :]
            cls = imdb._class_name[cls_ind]
            cls_set.append(cls)
            det_set.append(dets)
        
        vis_detections(im, cls_set, det_set, 0.7)
        plt.show()
             
        cls_stack = []
        boxes_and_scores = np.array([])

        # Visualize detections for each class
        CONF_THRESH = 0.7
        for cls_ind, cls in enumerate(CLASSES[1:]):
            cls_ind += 1  # because we skipped background

            det_this_cls = dets[np.where(dets[:, 0] == cls_ind)[0]]
            if (boxes_and_scores.size == 0):
                boxes_and_scores = det_this_cls[:, 1:]
            else:
                boxes_and_scores = np.vstack(
                    (boxes_and_scores, det_this_cls[:, 1:]))

            cls_stack.extend(cls for i in range(len(det_this_cls)))
        if len(boxes_and_scores) > 0:
            vis_detections(pvanet,
                           im,
                           im_file_o,
                           cls_stack,
                           np.array(boxes_and_scores),
                           thresh=CONF_THRESH,
                           save=True)

    print('..........................................')
    print('\tTESTING REPORT')
    print('..........................................')
    print('REPORTS:\tTotal\t{:.3f}s'.format(total_time))
    print('------------------------------------------')