def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False): """Evaluate bounding box detection.""" logger.info('Evaluating detections') not_comp = not cfg.TEST.COMPETITION_MODE if _use_json_dataset_evaluator(dataset): coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp) box_results = _coco_eval_to_box_results(coco_eval) elif _use_cityscapes_evaluator(dataset): logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions') coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp) box_results = _coco_eval_to_box_results(coco_eval) elif _use_voc_evaluator(dataset): # For VOC, always use salt and always cleanup because results are # written to the shared VOCdevkit results directory voc_eval = voc_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=use_matlab) box_results = _voc_eval_to_box_results(voc_eval) else: raise NotImplementedError('No evaluator for dataset: {}'.format( dataset.name)) return OrderedDict([(dataset.name, box_results)])
def evaluate_boxes(dataset, all_boxes, output_dir, all_cls_scores, test_corloc=False, use_matlab=False): """Evaluate bounding box detection.""" logger.info('Evaluating detections') not_comp = not cfg.TEST.COMPETITION_MODE if _use_voc_evaluator(dataset): # For VOC, always use salt and always cleanup because results are # written to the shared VOCdevkit results directory voc_eval = voc_dataset_evaluator.evaluate_boxes( dataset, all_boxes, output_dir, all_cls_scores, test_corloc=test_corloc, use_matlab=use_matlab) box_results = _voc_eval_to_box_results(voc_eval) else: raise NotImplementedError('No evaluator for dataset: {}'.format( dataset.name)) return OrderedDict([(dataset.name, box_results)])
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False): """Evaluate bounding box detection.""" logger.info('Evaluating detections') not_comp = not cfg.TEST.COMPETITION_MODE if _use_json_dataset_evaluator(dataset): coco_eval = json_dataset_evaluator.evaluate_boxes( dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp ) box_results = _coco_eval_to_box_results(coco_eval) elif _use_cityscapes_evaluator(dataset): logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions') coco_eval = json_dataset_evaluator.evaluate_boxes( dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp ) box_results = _coco_eval_to_box_results(coco_eval) elif _use_voc_evaluator(dataset): # For VOC, always use salt and always cleanup because results are # written to the shared VOCdevkit results directory voc_eval = voc_dataset_evaluator.evaluate_boxes( dataset, all_boxes, output_dir, use_matlab=use_matlab ) box_results = _voc_eval_to_box_results(voc_eval) else: raise NotImplementedError( 'No evaluator for dataset: {}'.format(dataset.name) ) return OrderedDict([(dataset.name, box_results)])
def test_net_on_dataset(args, dataset, detections, use_matlab = True, early_stop=False): num_images = len(dataset) num_classes = dataset.num_classes + 1 final_boxes = empty_results(num_classes, num_images) test_corloc = 'train' in dataset.image_set for i in tqdm(range(num_images)): if early_stop and i > 10: break detect = detections[dataset.images[i]] proposals = dataset.proposals[i] if detect is not None: if test_corloc: _, _, cls_boxes_i = box_results_for_corloc(detect.numpy(), proposals) else: _, _, cls_boxes_i = box_results_with_nms_and_limit(detect.numpy(), proposals) extend_results(i, final_boxes, cls_boxes_i) else: final_boxes = None del detections del cls_boxes_i voc_eval = voc_dataset_evaluator.evaluate_boxes(dataset, final_boxes, args.output_dir, test_corloc=test_corloc, use_matlab=use_matlab) return voc_eval