def test_net(net, imdb, max_per_image=100, thresh=0.05, vis=False): """Test a Fast R-CNN network on an image database.""" num_images = len(imdb.image_index) # all detections are collected into: # all_boxes[cls][image] = N x 5 array of detections in # (x1, y1, x2, y2, score) all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] all_boxes_o = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] output_dir = get_output_dir(imdb, net) # timers _t = {'im_detect': Timer(), 'misc': Timer()} if not cfg.TEST.HAS_RPN: roidb = imdb.roidb for i in xrange(num_images): # filter out any ground truth boxes if cfg.TEST.HAS_RPN: box_proposals = None else: # The roidb may contain ground-truth rois (for example, if the roidb # comes from the training or val split). We only want to evaluate # detection on the *non*-ground-truth rois. We select those the rois # that have the gt_classes field set to 0, which means there's no # ground truth. box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0] im = cv2.imread(imdb.image_path_at(i)) _t['im_detect'].tic() scores, boxes = im_detect(net, im, box_proposals) _t['im_detect'].toc() _t['misc'].tic() # skip j = 0, because it's the background class for j in xrange(1, imdb.num_classes): inds = np.where(scores[:, j] > thresh)[0] cls_scores = scores[inds, j] cls_boxes = boxes[inds, j * 4:(j + 1) * 4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \ .astype(np.float32, copy=False) keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep, :] if vis: vis_detections(im, imdb.classes[j], cls_dets) all_boxes[j][i] = cls_dets # 保留原始检测结果 cls_scores_o = scores[:, j] cls_boxes_o = boxes[:, j * 4:(j + 1) * 4] cls_dets_o = np.hstack((cls_boxes_o, cls_scores_o[:, np.newaxis])) \ .astype(np.float32, copy=False) all_boxes_o[j][i] = cls_dets_o # Limit to max_per_image detections *over all classes* if max_per_image > 0: image_scores = np.hstack( [all_boxes[j][i][:, -1] for j in xrange(1, imdb.num_classes)]) if len(image_scores) > max_per_image: image_thresh = np.sort(image_scores)[-max_per_image] for j in xrange(1, imdb.num_classes): keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0] all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \ .format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time) det_file = os.path.join(output_dir, 'detections.pkl') with open(det_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) det_file_o = os.path.join(output_dir, 'detections_o.pkl') with open(det_file_o, 'wb') as f: cPickle.dump(all_boxes_o, f, cPickle.HIGHEST_PROTOCOL) print 'Evaluating detections' imdb.evaluate_detections(all_boxes, output_dir)
cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) if not args.randomize: print 'fixing the random seeds (numpy and caffe) for reproducibility' np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(args.gpu_id) imdb, roidb = combined_roidb(args.imdb_name) print '{:d} roidb entries'.format(len(roidb)) cfg.TRAIN.GAN_imdb_name=args.imdb_name output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) if cfg.CSC_DEBUG: vis_dir = get_vis_dir(imdb) train_net(args.solver, roidb, output_dir, pretrained_model=args.pretrained_model, snapshot_state=args.snapshot_state, max_iters=args.max_iters)
def test_net_bbox(net, imdb, max_per_image=100, thresh=0.00000001, vis=False): """Test a network on an image database.""" num_images = len(imdb.image_index) # all detections are collected into: # all_boxes[cls][image] = N x 5 array of detections in # (x1, y1, x2, y2, score) all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] all_scores = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] output_dir = get_output_dir(imdb, net) if cfg.CSC_DEBUG: vis_dir = get_vis_dir(imdb, net) # timers _t = {'im_detect_bbox': Timer(), 'misc': Timer()} roidb = imdb.roidb test_scales = cfg.TEST.SCALES save_id = 0 for i in xrange(num_images): # if imdb.image_index[i] != '001547': # continue # if i>100: # continue # filter out any ground truth boxes # The roidb may contain ground-truth rois (for example, if the roidb # comes from the training or val split). We only want to evaluate # detection on the *non*-ground-truth rois. We select those the rois # that have the gt_classes field set to 0, which means there's no # ground truth. # box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0] box_proposals = roidb[i]['boxes'] rois_per_this_image = min(cfg.TEST.ROIS_PER_IM, len(box_proposals)) box_proposals = box_proposals[0:rois_per_this_image, :] if cfg.USE_ROI_SCORE: box_scores = roidb[i]['box_scores'] else: box_scores = None im = cv2.imread(imdb.image_path_at(i)) _t['im_detect_bbox'].tic() scores = None boxes = None for target_size in test_scales: if cfg.CSC_DEBUG: # save_subdir = time.strftime("%Y-%m-%d", time.gmtime()) # save_dir = os.path.join('tmp', save_subdir) # if not os.path.exists(save_dir): # os.makedirs(save_dir) cv2.imwrite(os.path.join(vis_dir, str(save_id) + '_.png'), im) save_id += 1 cfg.TEST.SCALES = (target_size, ) scores_scale, boxes_scale = im_detect_bbox(net, im, box_proposals, box_scores) if scores is None: scores = scores_scale boxes = boxes_scale else: scores = np.vstack((scores, scores_scale)) boxes = np.vstack((boxes, boxes_scale)) if cfg.TEST.USE_FLIPPED: im_flip = im[:, ::-1, :] box_proposals_flip = box_proposals.copy() oldx1 = box_proposals_flip[:, 0].copy() oldx2 = box_proposals_flip[:, 2].copy() box_proposals_flip[:, 0] = im.shape[1] - oldx2 - 1 box_proposals_flip[:, 2] = im.shape[1] - oldx1 - 1 for target_size in test_scales: if cfg.CSC_DEBUG: # save_subdir = time.strftime("%Y-%m-%d", time.gmtime()) # save_dir = os.path.join('tmp', save_subdir) cv2.imwrite(os.path.join(vis_dir, str(save_id) + '_.png'), im_flip) save_id += 1 cfg.TEST.SCALES = (target_size, ) scores_scale, boxes_scale = im_detect_bbox( net, im_flip, box_proposals_flip, box_scores) # scores = np.vstack((scores, scores_scale)) # boxes = np.vstack((boxes, boxes_scale)) _t['im_detect_bbox'].toc() _t['misc'].tic() # skip j = 0, because it's the background class # f**k skip for j in xrange(0, imdb.num_classes): all_scores[j][i] = sum(scores[:, j]) inds = np.where(scores[:, j] > thresh)[0] cls_scores = scores[inds, j] # if len(cls_scores) > 0: # sum_score = sum(cls_scores) # max_score = max(cls_scores) # print cls_scores # cls_scores *= (sum_score / max_score) # print sum_score, max_score, sum_score / max_score # print cls_scores cls_boxes = boxes[inds, j * 4:(j + 1) * 4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \ .astype(np.float32, copy=False) if vis: vis_heatmap(im, i, imdb.classes[j], cls_dets, thresh=0.3) keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep, :] # if vis: # vis_detections(im, imdb.classes[j], cls_dets, thresh=thresh) all_boxes[j][i] = cls_dets if vis: import matplotlib.pyplot as plt # plt.show() plt.close('all') # Limit to max_per_image detections *over all classes* if max_per_image > 0: image_scores = np.hstack( [all_boxes[j][i][:, -1] for j in xrange(0, imdb.num_classes)]) if len(image_scores) > max_per_image: image_thresh = np.sort(image_scores)[-max_per_image] for j in xrange(0, imdb.num_classes): keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0] all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() print 'im_detect_bbox: {:d}/{:d} {:.3f}s {:.3f}s' \ .format(i + 1, num_images, _t['im_detect_bbox'].average_time, _t['misc'].average_time) det_file = os.path.join(output_dir, 'detections.pkl') with open(det_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) print 'Evaluating detections' imdb.evaluate_detections(all_boxes, output_dir, all_scores=all_scores)
def test_net_cache(net, imdb, max_per_image=100, thresh=0.000000001, vis=False, scale=1.0): """Test a network on an image database.""" print 'max_per_image: ', max_per_image print 'thresh: ', thresh num_images = len(imdb.image_index) # all detections are collected into: # all_boxes[cls][image] = N x 5 array of detections in # (x1, y1, x2, y2, score) all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] all_scores = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] output_dir = get_output_dir(imdb, net) if cfg.CSC_DEBUG: vis_dir = get_vis_dir(imdb, net) det_file = os.path.join(output_dir, 'detections.pkl') if not os.path.isfile(det_file): print 'file not exists: ', det_file # we make sure all region all left origin_NMS = cfg.TEST.NMS cfg.TEST.NMS = 1.1 test_net(net, imdb, max_per_image=99999, thresh=0.0000, vis=False) cfg.TEST.NMS = origin_NMS with open(det_file, 'rb') as f: all_boxes_cache = cPickle.load(f) print 'all_boxes_cache: ', len(all_boxes_cache), len(all_boxes_cache[0]) print 'all_boxes_cache: ', all_boxes_cache[0][0].shape print 'all_boxes_cache: ', all_boxes_cache[14][0].shape # timers _t = {'im_detect': Timer(), 'misc': Timer()} roidb = imdb.roidb test_scales = cfg.TEST.SCALES save_id = 0 for i in xrange(num_images): _t['im_detect'].tic() _t['im_detect'].toc() _t['misc'].tic() # skip j = 0, because it's the background class # f**k skip for j in xrange(0, imdb.num_classes): # all_scores[j][i] = sum(scores[:, j]) all_scores[j][i] = sum(all_boxes_cache[j][i][:, -1]) # inds = np.where(scores[:, j] > thresh)[0] # cls_scores = scores[inds, j] inds = np.where(all_boxes_cache[j][i][:, -1] > thresh)[0] cls_scores = all_boxes_cache[j][i][inds, -1] # cls_boxes = boxes[inds, j * 4:(j + 1) * 4] cls_boxes = all_boxes_cache[j][i][inds, 0:4] cls_boxes = resize_boxes(cls_boxes, scale) cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \ .astype(np.float32, copy=False) if vis: vis_heatmap(im, i, imdb.classes[j], cls_dets, thresh=0.3) keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep, :] # if vis: # vis_detections(im, imdb.classes[j], cls_dets, thresh=thresh) all_boxes[j][i] = cls_dets if vis: import matplotlib.pyplot as plt # plt.show() plt.close('all') # Limit to max_per_image detections *over all classes* if max_per_image > 0: image_scores = np.hstack( [all_boxes[j][i][:, -1] for j in xrange(0, imdb.num_classes)]) if len(image_scores) > max_per_image: image_thresh = np.sort(image_scores)[-max_per_image] for j in xrange(0, imdb.num_classes): keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0] all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \ .format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time) print 'Evaluating detections' imdb.evaluate_detections(all_boxes, output_dir, all_scores=all_scores)
def test_net_ensemble2(det_dirs, imdb, max_per_image=100, thresh=0.000000001): print 'max_per_image: ', max_per_image print 'thresh: ', thresh num_images = len(imdb.image_index) # all detections are collected into: # all_boxes[cls][image] = N x 5 array of detections in # (x1, y1, x2, y2, score) all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] all_scores = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] output_dir = get_output_dir(imdb, None) # load all the detection results # all_boxes_cache = None all_boxes_cache = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] image_index = imdb.image_index for det_dir in det_dirs: p = 1.0 if '2' in det_dir: p = 10.0 for dirpath, dirnames, filenames in os.walk(det_dir): for filename in filenames: print 'load res: ', os.path.join(dirpath, filename) c = -1 for c_i, cls in enumerate(imdb.classes): if cls + '.txt' in filename: c = c_i break assert c > -1 with open(os.path.join(dirpath, filename), 'r') as f: for line in f.readlines(): line = line.strip() im_id, score, xmin, ymin, xmax, ymax = line.split(' ') im_i = image_index.index(im_id) all_boxes_cache[c][im_i].append([ float(xmin) - 1, float(ymin) - 1, float(xmax) - 1, float(ymax) - 1, float(score) * p ]) for n in xrange(num_images): for c in xrange(imdb.num_classes): if len(all_boxes_cache[c][n]) == 0: all_boxes_cache[c][n] = np.zeros((0, 5), dtype=np.float32) else: all_boxes_cache[c][n] = np.array(all_boxes_cache[c][n], dtype=np.float32) print 'all_boxes_cache: ', len(all_boxes_cache), len(all_boxes_cache[0]) print 'all_boxes_cache[0][0]: ', all_boxes_cache[0][0].shape # print 'all_boxes_cache[0][0][0]: ', all_boxes_cache[0][0][0] # print 'all_boxes_cache[14][0]: ', all_boxes_cache[14][0].shape # print 'all_boxes_cache[14][0][0]: ', all_boxes_cache[14][0][0] # timers _t = {'im_detect': Timer(), 'misc': Timer()} roidb = imdb.roidb for i in xrange(num_images): _t['im_detect'].tic() _t['im_detect'].toc() _t['misc'].tic() # skip j = 0, because it's the background class # f**k skip for j in xrange(0, imdb.num_classes): # all_scores[j][i] = sum(scores[:, j]) all_scores[j][i] = sum(all_boxes_cache[j][i][:, -1]) # inds = np.where(scores[:, j] > thresh)[0] # cls_scores = scores[inds, j] inds = np.where(all_boxes_cache[j][i][:, -1] > thresh)[0] cls_scores = all_boxes_cache[j][i][inds, -1] # cls_boxes = boxes[inds, j * 4:(j + 1) * 4] cls_boxes = all_boxes_cache[j][i][inds, 0:4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \ .astype(np.float32, copy=False) keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep, :] all_boxes[j][i] = cls_dets # Limit to max_per_image detections *over all classes* if max_per_image > 0: image_scores = np.hstack( [all_boxes[j][i][:, -1] for j in xrange(0, imdb.num_classes)]) if len(image_scores) > max_per_image: image_thresh = np.sort(image_scores)[-max_per_image] for j in xrange(0, imdb.num_classes): keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0] all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \ .format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time) det_file = os.path.join(output_dir, 'detections.pkl') with open(det_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) print 'Evaluating detections' imdb.evaluate_detections(all_boxes, output_dir, all_scores=all_scores)
def test_net_ensemble(det_dirs, imdb, max_per_image=100, thresh=0.000000001): print 'max_per_image: ', max_per_image print 'thresh: ', thresh num_images = len(imdb.image_index) # all detections are collected into: # all_boxes[cls][image] = N x 5 array of detections in # (x1, y1, x2, y2, score) all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] all_scores = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] output_dir = get_output_dir(imdb, None) # load all the detection results all_boxes_cache = None for det_dir in det_dirs: det_path = os.path.join(det_dir, 'detections_o.pkl') print 'load det: ', det_path assert os.path.isfile(det_path), 'no det file: ' + det_path with open(det_path, 'rb') as f: all_boxes_cache_this = cPickle.load(f) print 'all_boxes_cache_this: ', len(all_boxes_cache_this), len( all_boxes_cache_this[0]) print 'all_boxes_cache_this[0][0]: ', all_boxes_cache_this[0][0].shape # print 'all_boxes_cache_this[0][0][0]: ', all_boxes_cache_this[0][0][0] # print 'all_boxes_cache_this[14][0]: ', all_boxes_cache_this[14][0].shape # print 'all_boxes_cache_this[14][0][0]: ', # all_boxes_cache_this[14][0][0] if all_boxes_cache is None: all_boxes_cache = all_boxes_cache_this else: print 'Sum up all result' print 'If error happen here, it counld be that the dimensions miss match.' for c in xrange(imdb.num_classes): for n in xrange(num_images): all_boxes_cache[c][n][:, 4] += all_boxes_cache_this[c][n][:, 4] print 'all_boxes_cache: ', len(all_boxes_cache), len(all_boxes_cache[0]) print 'all_boxes_cache[0][0]: ', all_boxes_cache[0][0].shape # print 'all_boxes_cache[0][0][0]: ', all_boxes_cache[0][0][0] # print 'all_boxes_cache[14][0]: ', all_boxes_cache[14][0].shape # print 'all_boxes_cache[14][0][0]: ', all_boxes_cache[14][0][0] # timers _t = {'im_detect': Timer(), 'misc': Timer()} roidb = imdb.roidb for i in xrange(num_images): _t['im_detect'].tic() _t['im_detect'].toc() _t['misc'].tic() # skip j = 0, because it's the background class # f**k skip for j in xrange(0, imdb.num_classes): # all_scores[j][i] = sum(scores[:, j]) all_scores[j][i] = sum(all_boxes_cache[j][i][:, -1]) # inds = np.where(scores[:, j] > thresh)[0] # cls_scores = scores[inds, j] inds = np.where(all_boxes_cache[j][i][:, -1] > thresh)[0] cls_scores = all_boxes_cache[j][i][inds, -1] # cls_boxes = boxes[inds, j * 4:(j + 1) * 4] cls_boxes = all_boxes_cache[j][i][inds, 0:4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \ .astype(np.float32, copy=False) keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep, :] all_boxes[j][i] = cls_dets # Limit to max_per_image detections *over all classes* if max_per_image > 0: image_scores = np.hstack( [all_boxes[j][i][:, -1] for j in xrange(0, imdb.num_classes)]) if len(image_scores) > max_per_image: image_thresh = np.sort(image_scores)[-max_per_image] for j in xrange(0, imdb.num_classes): keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0] all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \ .format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time) det_file = os.path.join(output_dir, 'detections.pkl') with open(det_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) print 'Evaluating detections' imdb.evaluate_detections(all_boxes, output_dir, all_scores=all_scores)
def test_net(net, imdb, max_per_image=100, thresh=0.000000001, vis=False): """Test a network on an image database.""" if 'coco' in imdb.name: max_per_image = 100 print 'max_per_image: ', max_per_image print 'thresh: ', thresh num_images = len(imdb.image_index) # all detections are collected into: # all_boxes[cls][image] = N x 5 array of detections in # (x1, y1, x2, y2, score) all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] all_scores = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] all_boxes_o = [[[] for _ in xrange(num_images)] for _ in xrange(imdb.num_classes)] output_dir = get_output_dir(imdb, net) if cfg.CSC_DEBUG: vis_dir = get_vis_dir(imdb, net) # timers _t = {'im_detect': Timer(), 'misc': Timer()} roidb = imdb.roidb test_scales = cfg.TEST.SCALES save_id = 0 for i in xrange(num_images): # if imdb.image_index[i] != '001547': # continue # if i > 100: # break if vis: import matplotlib.pyplot as plt # 关闭所有窗口 # plt.close('all') box_proposals = roidb[i]['boxes'] rois_per_this_image = min(cfg.TEST.ROIS_PER_IM, len(box_proposals)) box_proposals = box_proposals[0:rois_per_this_image, :] if cfg.USE_ROI_SCORE: box_scores = roidb[i]['box_scores'] else: box_scores = None im = cv2.imread(imdb.image_path_at(i)) _t['im_detect'].tic() scores = None boxes = None for target_size in test_scales: if cfg.CSC_DEBUG: save_path = os.path.join(vis_dir, str(save_id) + '_.png') save_debug_im(im, target_size, save_path) save_id += 1 cfg.TEST.SCALES = (target_size, ) scores_scale, boxes_scale = im_detect(net, im, box_proposals, box_scores) if scores is None: scores = scores_scale boxes = boxes_scale else: # TODO(YH): something to do scores += scores_scale assert np.array_equal( boxes, boxes_scale), 'boxes at each scale should be the same' if cfg.CSC_DEBUG: os.remove(save_path) if cfg.TEST.USE_FLIPPED: im_flip = im[:, ::-1, :] box_proposals_flip = box_proposals.copy() oldx1 = box_proposals_flip[:, 0].copy() oldx2 = box_proposals_flip[:, 2].copy() box_proposals_flip[:, 0] = im.shape[1] - oldx2 - 1 box_proposals_flip[:, 2] = im.shape[1] - oldx1 - 1 for target_size in test_scales: boxes_scale_o = boxes_scale if cfg.CSC_DEBUG: save_path = os.path.join(vis_dir, str(save_id) + '_.png') save_debug_im(im_flip, target_size, save_path) save_id += 1 cfg.TEST.SCALES = (target_size, ) scores_scale, boxes_scale, = im_detect(net, im_flip, box_proposals_flip, box_scores) scores += scores_scale if cfg.CSC_DEBUG: os.remove(save_path) _t['im_detect'].toc() _t['misc'].tic() # skip j = 0, because it's the background class # f**k skip for j in xrange(0, imdb.num_classes): if 'trainval' in imdb.name: if imdb.image_classes_at(i)[j] == 0: all_boxes[j][i] = np.zeros((0, 5), dtype=np.float32) all_boxes_o[j][i] = np.zeros((0, 5), dtype=np.float32) continue all_scores[j][i] = sum(scores[:, j]) inds = np.where(scores[:, j] > thresh)[0] cls_scores = scores[inds, j] cls_boxes = boxes[inds, j * 4:(j + 1) * 4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \ .astype(np.float32, copy=False) keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep, :] if vis: # vis_heatmap(im, i, imdb.classes[j], cls_dets, thresh=0.3) # vis_detections_highest( # im, imdb.classes[j], cls_dets, thresh=0.3) vis_detections(im, imdb.classes[j], cls_dets, thresh=0.03) all_boxes[j][i] = cls_dets # 保留原始检测结果 cls_scores_o = scores[:, j] cls_boxes_o = boxes[:, j * 4:(j + 1) * 4] cls_dets_o = np.hstack((cls_boxes_o, cls_scores_o[:, np.newaxis])) \ .astype(np.float32, copy=False) all_boxes_o[j][i] = cls_dets_o # Limit to max_per_image detections *over all classes* if max_per_image > 0: image_scores = np.hstack( [all_boxes[j][i][:, -1] for j in xrange(0, imdb.num_classes)]) if len(image_scores) > max_per_image: image_thresh = np.sort(image_scores)[-max_per_image] for j in xrange(0, imdb.num_classes): keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0] all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \ .format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time) if cfg.CSC_DEBUG: return det_file = os.path.join(output_dir, 'detections.pkl') with open(det_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) det_file_o = os.path.join(output_dir, 'detections_o.pkl') with open(det_file_o, 'wb') as f: cPickle.dump(all_boxes_o, f, cPickle.HIGHEST_PROTOCOL) print 'Evaluating detections' imdb.evaluate_detections(all_boxes, output_dir, all_scores=all_scores)