cls_boxes = cls_boxes[top_inds] # push new scores onto the minheap for val in cls_scores: heapq.heappush(top_scores[cls_ind], val) # if we've collected more than the max number of detection, # then pop items off the minheap and update the class threshold if len(top_scores[cls_ind]) > max_per_set: while len(top_scores[cls_ind]) > max_per_set: heapq.heappop(top_scores[cls_ind]) thresh[cls_ind] = top_scores[cls_ind][0] all_boxes[cls_ind][mb_idx] = np.hstack( (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False) for j in xrange(1, num_classes): for i in xrange(num_images): if len(all_boxes[j][i]) > 0: inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0] all_boxes[j][i] = all_boxes[j][i][inds, :] neon_logger.display('\nApplying NMS to all detections') all_boxes = valid_set.apply_nms(all_boxes, NMS_THRESH) neon_logger.display('Evaluating detections') output_dir = 'frcn_output' annopath, imagesetfile = valid_set.evaluation( all_boxes, os.path.join(args.data_dir, output_dir)) run_voc_eval(annopath, imagesetfile, year, image_set, PASCAL_VOC_CLASSES, os.path.join(args.data_dir, output_dir))
last_strlen = 0 for mb_idx, (x, y) in enumerate(valid_set): prt_str = "Finished: {} / {}".format(mb_idx, valid_set.nbatches) sys.stdout.write('\r' + ' ' * last_strlen + '\r') sys.stdout.write(prt_str.encode('utf-8')) last_strlen = len(prt_str) sys.stdout.flush() # perform forward pass outputs = model.fprop(x, inference=True) # retrieve image metadata im_shape = valid_set.im_shape.get() im_scale = valid_set.im_scale.get() # retrieve region proposals generated by the model (proposals, num_proposals) = proposalLayer.get_proposals() # convert outputs to bounding boxes boxes = util.get_bboxes(outputs, proposals, num_proposals, valid_set.num_classes, im_shape, im_scale, max_per_image, thresh, nms_thresh) all_boxes[mb_idx] = boxes neon_logger.display('Evaluating detections') annopath, imagesetfile = valid_set.evaluation(all_boxes, args.output_dir) util.run_voc_eval(annopath, imagesetfile, year, image_set, valid_set.CLASSES, args.output_dir)
top_inds = np.argsort(-cls_scores)[:max_per_image] cls_scores = cls_scores[top_inds] cls_boxes = cls_boxes[top_inds] # push new scores onto the minheap for val in cls_scores: heapq.heappush(top_scores[cls_ind], val) # if we've collected more than the max number of detection, # then pop items off the minheap and update the class threshold if len(top_scores[cls_ind]) > max_per_set: while len(top_scores[cls_ind]) > max_per_set: heapq.heappop(top_scores[cls_ind]) thresh[cls_ind] = top_scores[cls_ind][0] all_boxes[cls_ind][mb_idx] = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype( np.float32, copy=False) for j in xrange(1, num_classes): for i in xrange(num_images): if len(all_boxes[j][i]) > 0: inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0] all_boxes[j][i] = all_boxes[j][i][inds, :] print '\nApplying NMS to all detections' all_boxes = valid_set.apply_nms(all_boxes, NMS_THRESH) print 'Evaluating detections' output_dir = 'frcn_output' annopath, imagesetfile = valid_set.evaluation(all_boxes, os.path.join(args.data_dir, output_dir)) run_voc_eval(annopath, imagesetfile, year, image_set, PASCAL_VOC_CLASSES, os.path.join(args.data_dir, output_dir))
for _ in range(num_images)] last_strlen = 0 for mb_idx, (x, y) in enumerate(valid_set): prt_str = "Finished: {} / {}".format(mb_idx, valid_set.nbatches) sys.stdout.write('\r' + ' '*last_strlen + '\r') sys.stdout.write(prt_str.encode('utf-8')) last_strlen = len(prt_str) sys.stdout.flush() # perform forward pass outputs = model.fprop(x, inference=True) # retrieve image metadata im_shape = valid_set.im_shape.get() im_scale = valid_set.im_scale.get() # retrieve region proposals generated by the model (proposals, num_proposals) = proposalLayer.get_proposals() # convert outputs to bounding boxes boxes = util.get_bboxes(outputs, proposals, num_proposals, valid_set.num_classes, im_shape, im_scale, max_per_image, thresh, nms_thresh) all_boxes[mb_idx] = boxes neon_logger.display('Evaluating detections') annopath, imagesetfile = valid_set.evaluation(all_boxes, args.output_dir) util.run_voc_eval(annopath, imagesetfile, year, image_set, valid_set.CLASSES, args.output_dir)