def _evaluate_predictions_on_lvis( lvis_gt, lvis_results, iou_type, class_names=None): """ Args: iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"] logger = logging.getLogger(__name__) if len(lvis_results) == 0: # TODO: check if needed logger.warn("No predictions from the model! Set scores to -1") return {metric: -1 for metric in metrics} from lvis import LVISEval, LVISResults lvis_results = LVISResults(lvis_gt, lvis_results) lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) lvis_eval.run() lvis_eval.print_results() # Pull the standard metrics from the LVIS results results = lvis_eval.get_results() results = {metric: float(results[metric] * 100) for metric in metrics} logger.info( "Evaluation results for {}: \n".format(iou_type) + \ create_small_table(results) ) return results
def test_gt_boxes_as_anns(ann, ann_path, ann_type='bbox'): annotations = ann['annotations'] template_pre = { 'image_id': 0, 'category_id': 0, 'bbox': [0., 0., 0., 0.], 'score': 1. } gt_to_pre = [] for idx, annotation in enumerate(annotations): if idx % 10 == 0: print("{}/{}".format(idx, len(annotations)), end="\r") pre = copy.deepcopy(template_pre) pre['image_id'] = annotation['image_id'] pre['category_id'] = annotation['category_id'] pre['bbox'] = annotation['bbox'] pre['score'] = 1.0 gt_to_pre.append(pre) PRE_OUT_PATH = "./data/lvis_gt_pred.json" with open(PRE_OUT_PATH, "w") as f: pre = json.dump(gt_to_pre, f) print("Stored GT to pred JSON.\n") lvis_eval = LVISEval(ann_path, PRE_OUT_PATH, ann_type) print("Constructed lvis_eval object.") lvis_eval.run() print("Finished lvis_eval.run()") lvis_eval.print_results()
def _evaluate_predictions_on_lvis(lvis_gt, lvis_results, iou_type, class_names=None): metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], }[iou_type] logger = logging.getLogger(__name__) if len(lvis_results) == 0: logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} if iou_type == "segm": lvis_results = copy.deepcopy(lvis_results) for c in lvis_results: c.pop("bbox", None) from lvis import LVISEval, LVISResults lvis_results = LVISResults(lvis_gt, lvis_results) lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) lvis_eval.run() lvis_eval.print_results() results = lvis_eval.get_results() results = {metric: float(results[metric] * 100) for metric in metrics} logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results)) return results
def _evaluate_predictions_on_lvis( lvis_gt, lvis_results, iou_type, max_dets=None, class_names=None ): """ Copied from detectron2.evaluation.lvis_evaluation, with support for max_dets. Args: iou_type (str): kpt_oks_sigmas (list[float]): max_dets (None or int) class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], }[iou_type] logger = logging.getLogger(__name__) if len(lvis_results) == 0: # TODO: check if needed logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} if iou_type == "segm": lvis_results = copy.deepcopy(lvis_results) # When evaluating mask AP, if the results contain bbox, LVIS API will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in lvis_results: c.pop("bbox", None) from lvis import LVISEval, LVISResults ##### # <modified> if max_dets is None: max_dets = 300 lvis_results_obj = LVISResults(lvis_gt, lvis_results, max_dets=max_dets) lvis_eval = LVISEval(lvis_gt, lvis_results_obj, iou_type) lvis_eval.params.max_dets = max_dets # </modified> ##### lvis_eval.run() lvis_eval.print_results() # Pull the standard metrics from the LVIS results results = lvis_eval.get_results() results = {metric: float(results[metric] * 100) for metric in metrics} logger.info( f"Evaluation results for {iou_type}, max_dets {max_dets} \n" + create_small_table(results) ) return results
def _evaluate_predictions_on_lvis(lvis_gt, lvis_results, iou_type, max_dets_per_image=None, class_names=None): """ Args: iou_type (str): max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP This limit, by default of the LVIS dataset, is 300. class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], }[iou_type] logger = logging.getLogger(__name__) if len(lvis_results) == 0: # TODO: check if needed logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} if iou_type == "segm": lvis_results = copy.deepcopy(lvis_results) # When evaluating mask AP, if the results contain bbox, LVIS API will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in lvis_results: c.pop("bbox", None) if max_dets_per_image is None: max_dets_per_image = 300 # Default for LVIS dataset from lvis import LVISEval, LVISResults logger.info( f"Evaluating with max detections per image = {max_dets_per_image}") lvis_results = LVISResults(lvis_gt, lvis_results, max_dets=max_dets_per_image) lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) lvis_eval.run() lvis_eval.print_results() # Pull the standard metrics from the LVIS results results = lvis_eval.get_results() results = {metric: float(results[metric] * 100) for metric in metrics} logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results)) return results
def do_lvis_evaluation( dataset, gt_path, predictions, box_only, output_folder, iou_types, iteration, ): logger = logging.getLogger("maskrcnn_benchmark.inference") if box_only: logger.info("Evaluating bbox proposals") areas = {"all": "", "small": "s", "medium": "m", "large": "l"} res = COCOResults("box_proposal") for limit in [100, 1000]: for area, suffix in areas.items(): stats = evaluate_box_proposals( predictions, dataset, area=area, limit=limit ) key = "AR{}@{:d}".format(suffix, limit) res.results["box_proposal"][key] = stats["ar"].item() logger.info(res) if output_folder: torch.save(res, os.path.join(output_folder, "box_proposals.pth")) return logger.info("Preparing results for LVIS format") lvis_results = prepare_for_lvis_evaluation(predictions, dataset, iou_types) if len(lvis_results) == 0: return {} dt_path = os.path.join(output_folder, "lvis_dt.json") import json with open(dt_path, "w") as f: json.dump(lvis_results, f) logger.info("Evaluating predictions") lvis_eval_info = {} for iou_type in iou_types: lvis_eval = LVISEval( gt_path, dt_path, iou_type ) lvis_eval.run() print(iou_type) lvis_eval.print_results() keys = lvis_eval.get_results().keys() for k in keys: lvis_eval_info[iou_type + k] = lvis_eval.get_results()[k] save_path = os.path.join(output_folder, str(iteration)) mkdir(save_path) lvis_eval_percat = LVISEvalPerCat( gt_path, dt_path, iou_type, save_path) lvis_eval_percat.run() lvis_eval_percat.print_results() return lvis_eval_info
def evaluate_predictions_on_lvis(coco_results, result_path, annotation_path, iou_type): import json with open(result_path, "w") as f: json.dump(coco_results, f) from lvis import LVIS, LVISEval lvis_eval = LVISEval(annotation_path, result_path, iou_type) lvis_eval.run() lvis_eval.print_results() return lvis_eval
def lvis_eval(result_files, result_types, lvis, ann_file, max_dets=(100, 300, 1000), existing_json=None): ANNOTATION_PATH = ann_file print('gt: ', ANNOTATION_PATH) for res_type in result_types: assert res_type in [ 'proposal', 'proposal_fast', 'proposal_fast_percat', 'bbox', 'segm', 'keypoints' ] if mmcv.is_str(lvis): lvis = LVIS(lvis) assert isinstance(lvis, LVIS) if result_types == ['proposal_fast']: ar = lvis_fast_eval_recall(result_files, lvis, np.array(max_dets)) for i, num in enumerate(max_dets): print('AR@{}\t= {:.4f}'.format(num, ar[i])) return elif result_types == ['proposal_fast_percat']: assert existing_json is not None per_cat_recall = {} for cat_id in range(1, 1231): ar = lvis_fast_eval_recall(result_files, lvis, np.array(max_dets), category_id=cat_id) for i, num in enumerate(max_dets): per_cat_recall.update({cat_id: ar}) print('cat{} AR@{}\t= {:.4f}'.format(cat_id, num, ar[i])) pickle.dump(per_cat_recall, open('./{}_per_cat_recall.pt'.format(existing_json), 'wb')) return for res_type in result_types: result_file = result_files[res_type] assert result_file.endswith('.json') iou_type = 'bbox' if res_type == 'proposal' else res_type lvisEval = LVISEval(ANNOTATION_PATH, result_file, iou_type) # lvisEval.params.imgIds = img_ids if res_type == 'proposal': lvisEval.params.use_cats = 0 lvisEval.params.max_dets = list(max_dets) lvisEval.run() lvisEval.print_results()
def eval_partial_results(epoch, dset_name, validation_path): results = [] mAP = -1 directory = 'bbox_results/temp_res' for filename in os.listdir(directory): if filename.endswith(".json"): temp_name = os.path.join(directory, filename) with open(temp_name, 'rb') as f: results = list(itertools.chain(results, pickle.load(f))) cwd = os.getenv('owd') validation_path = os.path.join(cwd, validation_path) if not os.path.exists(f'bbox_results/{dset_name}/'): os.makedirs(f'bbox_results/{dset_name}/') json.dump(results, open(f'./bbox_results/{dset_name}/results_{epoch}.json', 'w'), indent=4) resFile = f'./bbox_results/{dset_name}/results_{epoch}.json' if (dset_name == 'coco') | (dset_name == 'drones'): cocoGt = COCO(validation_path) try: cocoDt = cocoGt.loadRes(resFile) except IndexError: print('empty list return zero map') return 0 cocoDt.loadAnns() # running evaluation cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() mAP = cocoEval.stats[0] elif (dset_name == 'lvis'): lvis_eval = LVISEval(validation_path, resFile, 'bbox') lvis_eval.run() metrics = lvis_eval.get_results() lvis_eval.print_results() mAP = metrics['AP'] return (mAP)
def evaluate(self, max_dets_per_image=None): all_preds, main_process = self.synchronize_between_processes() if main_process: if max_dets_per_image is None: max_dets_per_image = 300 eval_imgs = [lvis_res['image_id'] for lvis_res in all_preds] gt_subset = LvisEvaluator._make_lvis_subset( self.lvis_gt, eval_imgs) for iou_type in self.iou_types: print('Evaluating for iou', iou_type) if iou_type == "segm": # See: # https://detectron2.readthedocs.io/en/latest/_modules/detectron2/evaluation/lvis_evaluation.html lvis_results = copy.deepcopy(all_preds) for c in lvis_results: c.pop("bbox", None) else: lvis_results = all_preds lvis_results = LVISResults(gt_subset, lvis_results, max_dets=max_dets_per_image) lvis_eval = LVISEval(gt_subset, lvis_results, iou_type) lvis_eval.params.img_ids = list(set(eval_imgs)) lvis_eval.run() self.lvis_eval_per_iou[iou_type] = lvis_eval else: self.lvis_eval_per_iou = None if dist.is_initialized(): dist.barrier() result_dict = None if self.lvis_eval_per_iou is not None: result_dict = dict() for iou, eval_data in self.lvis_eval_per_iou.items(): result_dict[iou] = dict() for key in eval_data.results: value = eval_data.results[key] result_dict[iou][key] = value return result_dict
def eval_results(results, dset_name, validation_path): cwd = os.getenv('owd') validation_path = os.path.join(cwd, validation_path) if not os.path.exists(f'bbox_results/{dset_name}/'): os.makedirs(f'bbox_results/{dset_name}/') rid = (random.randint(0, 1000000)) json.dump(results, open(f'./bbox_results/{dset_name}/results_{rid}.json', 'w'), indent=4) resFile = f'./bbox_results/{dset_name}/results_{rid}.json' if (dset_name == 'coco') | (dset_name == 'drones'): cocoGt = COCO(validation_path) try: cocoDt = cocoGt.loadRes(resFile) except IndexError: print('empty list return zero map') return 0 cocoDt.loadAnns() # running evaluation cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() mAP = cocoEval.stats[0] elif (dset_name == 'lvis'): try: lvis_eval = LVISEval(validation_path, resFile, 'bbox') except IndexError: print('empty list return zero map') return 0 lvis_eval.run() metrics = lvis_eval.get_results() lvis_eval.print_results() mAP = metrics['AP'] os.remove(resFile) return (mAP)
def lvis_eval(resfile, res_type): print('*********evaluating *{}*'.format(res_type)) # tmp_file = osp.join(runner.work_dir, 'temp_0') # result_files = results2json(self.dataset, results, tmp_file) ANNOTATION_PATH = 'data/lvis/annotations/lvis_v0.5_val.json' # cocoGt = self.dataset.coco # imgIds = cocoGt.getImgIds() # for res_type in res_types: # # try: # # cocoDt = cocoGt.loadRes(result_files[res_type]) # # except IndexError: # # print('No prediction found.') # # break iou_type = res_type lvis_eval = LVISEval(ANNOTATION_PATH, resfile, iou_type) lvis_eval.run() lvis_eval.print_results()
def _evaluate_predictions_on_lvis(lvis_gt, lvis_results, iou_type): """ Evaluate the lvis results using LVISEval API. """ assert len(lvis_results) > 0 if iou_type == "segm": lvis_results = copy.deepcopy(lvis_results) # When evaluating mask AP, if the results contain bbox, LVIS API will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in lvis_results: c.pop("bbox", None) lvis_results = LVISResults(lvis_gt, lvis_results) lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) lvis_eval.run() lvis_eval.print_results() return lvis_eval
def evaluate(self, runner, results): tmp_file = osp.join(runner.work_dir, 'temp_0') result_files = results2json(self.dataset, results, tmp_file) res_types = ['bbox', 'segm' ] if runner.model.module.with_mask else ['bbox'] ANNOTATION_PATH = 'data/lvis/annotations/lvis_v0.5_val.json' # cocoGt = self.dataset.coco # imgIds = cocoGt.getImgIds() for res_type in res_types: # try: # cocoDt = cocoGt.loadRes(result_files[res_type]) # except IndexError: # print('No prediction found.') # break iou_type = res_type lvis_eval = LVISEval(ANNOTATION_PATH, result_files[res_type], iou_type) lvis_eval.run() lvis_eval.print_results()
def main(): config = fetch_config() print_args_stdout(config) ipdb.set_trace() print("Running eval.") lvis_eval = LVISEval(config.ann_path, config.results_path, config.ann_type) lvis_eval.run() lvis_eval.print_results() print("Finished eval.") ipdb.set_trace() # All precision values: 10 x 101 x 1230 x 4 # precision has dims (iou, recall, cls, area range) precisions = lvis_eval.eval['precision'] with open(config.ann_path, 'r') as outfile: gt = json.load(outfile) cat_metas = gt['categories'] cats = [] for cat_meta in cat_metas: cats.append((cat_meta['id'], cat_meta['name'])) cats.sort(key=itemgetter(0)) class_names = [cat[1] for cat in cats] area_type = 0 results_per_category, per_cat_results = fetch_aps(precisions, class_names, area_type) print("mAP for area type {}: {}".format(area_type, evaluate_map(results_per_category))) # Print for eye-balling. # print_aps(results_per_category, class_names, n_cols=6) # Store results_per_category into a JSON. with open(config.aps_json_path, 'w') as json_file: json.dump(per_cat_results, json_file, indent=4) # Store the 4D precisions tensor as a PKL. with open(config.prec_pkl_path, 'wb') as pkl_file: pickle.dump(precisions, pkl_file)
def do_lvis_evaluation( dataset, gt_path, predictions, output_folder, iou_types, iteration, ): logger = logging.getLogger("maskrcnn_benchmark.inference") logger.info("Preparing results for LVIS format") lvis_results = prepare_for_lvis_evaluation(predictions, dataset, iou_types) if len(lvis_results) == 0: return {} dt_path = os.path.join(output_folder, "lvis_dt.json") import json with open(dt_path, "w") as f: json.dump(lvis_results, f) logger.info("Evaluating predictions") lvis_eval_info = {} for iou_type in iou_types: lvis_eval = LVISEval(gt_path, dt_path, iou_type) lvis_eval.run() print(iou_type) lvis_eval.print_results() keys = lvis_eval.get_results().keys() for k in keys: lvis_eval_info[iou_type + k] = lvis_eval.get_results()[k] save_path = os.path.join(output_folder, str(iteration)) mkdir(save_path) lvis_eval_percat = LVISEvalPerCat(gt_path, dt_path, iou_type, save_path) lvis_eval_percat.run() lvis_eval_percat.print_results() return lvis_eval_info
get_count = num_get[v].sum().astype(np.float64) acc = get_count / ins_count print(template.format('(ACC)', '0.50:0.95', 'all', 300, k, acc * 100)) # with open('tempcls.pkl', 'rb') as fin: # savelist = pickle.load(fin) # num_get = savelist[0] # num_ins = savelist[1] # splitbin = get_split_bin() # accumulate_acc(num_ins, num_get, splitbin) # result and val files for 100 randomly sampled images. ANNOTATION_PATH = "data/lvis/lvis_v0.5_val.json" RESULT_PATH_BBOX = args.boxjson print('Eval Bbox:') ANN_TYPE = 'bbox' lvis_eval = LVISEval(ANNOTATION_PATH, RESULT_PATH_BBOX, ANN_TYPE) lvis_eval.run() lvis_eval.print_results() if not args.segjson == 'None': RESULT_PATH_SEGM = args.segjson print('Eval Segm:') ANN_TYPE = 'segm' lvis_eval = LVISEval(ANNOTATION_PATH, RESULT_PATH_SEGM, ANN_TYPE) lvis_eval.run() lvis_eval.print_results()
def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=300, iou_thrs=np.arange(0.5, 0.96, 0.05)): """Evaluation in COCO protocol. Args: results (list): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float]): IoU threshold used for evaluating recalls. If set to a list, the average recall of all IoUs will also be computed. Default: 0.5. Returns: dict[str: float] """ metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError('metric {} is not supported'.format(metric)) result_files = self.format_results(results, jsonfile_prefix) eval_results = {} cocoGt = self.coco for metric in metrics: msg = 'Evaluating {}...'.format(metric) if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': ar = self.fast_eval_recall(results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for i, num in enumerate(proposal_nums): eval_results['AR@{}'.format(num)] = ar[i] log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue if metric not in result_files: raise KeyError('{} is not in results'.format(metric)) try: cocoDt = cocoGt.loadRes(result_files[metric]) except IndexError: print_log('The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = 'bbox' if metric == 'proposal' else metric # run lvis evaluation eval_results['lvis'] = {} lvis_eval = LVISEval(self.ann_file_path, result_files[metric], iou_type) lvis_eval.params.max_dets = proposal_nums lvis_eval.run() lvis_eval.print_results() print('=====> The above metric is {}.'.format(iou_type)) keys = lvis_eval.get_results().keys() for k in keys: eval_results['lvis'][iou_type + k] = lvis_eval.get_results()[k] return eval_results
def main(): args = parse_args() assert args.out or args.show or args.json_out, \ ('Please specify at least one operation (save or show the results) ' 'with the argument "--out" or "--show" or "--json_out"') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') if args.json_out is not None and args.json_out.endswith('.json'): args.json_out = args.json_out[:-5] cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) while not osp.isfile(args.checkpoint): print('Waiting for {} to exist...'.format(args.checkpoint)) time.sleep(60) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES # assert not distributed if not distributed: model = MMDataParallel(model, device_ids=[0]) # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10] outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, args.tmpdir) rank, _ = get_dist_info() if args.out and rank == 0: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) eval_types = args.eval if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = args.out coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): if dataset.ann_file == 'data/coco/annotations/image_info_test-dev2017.json': result_files = results2json_segm(dataset, outputs, args.out, dump=True) else: result_files = results2json_segm(dataset, outputs, args.out, dump=False) if 'lvis' in dataset.ann_file: ## an ugly fix to make it compatible with coco eval from lvis import LVISEval lvisEval = LVISEval(cfg.data.test.ann_file, result_files, 'segm') lvisEval.run() lvisEval.print_results() #fix lvis api eval iou_thr error, should be 0.9 but was 0.8999 lvisEval.params.iou_thrs[8] = 0.9 for iou in [0.5, 0.6, 0.7, 0.8, 0.9]: print('AP at iou {}: {}'.format( iou, lvisEval._summarize('ap', iou_thr=iou))) else: coco_eval(result_files, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = args.out + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file, dump=False) coco_eval(result_files, eval_types, dataset.coco) ##eval on lvis-77###### cfg.data.test.ann_file = 'data/lvis/lvis_v0.5_val_cocofied.json' cfg.data.test.img_prefix = 'data/lvis/val2017/' cfg.data.test.test_mode = True dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # model_orig=model.module # model = MMDataParallel(model, device_ids=[0]).cuda() # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10] outputs = single_gpu_test(model, data_loader) print('\nwriting results to {}'.format('xxx')) # mmcv.dump(outputs, 'xxx') eval_types = ['segm'] if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = 'xxx' coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_files = results2json_segm(dataset, outputs, 'xxx', dump=False) from lvis import LVISEval lvisEval = LVISEval( 'data/lvis/lvis_v0.5_val_cocofied.json', result_files, 'segm') lvisEval.run() lvisEval.print_results() # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999 lvisEval.params.iou_thrs[8] = 0.9 for iou in [0.5, 0.6, 0.7, 0.8, 0.9]: print('AP at iou {}: {}'.format( iou, lvisEval._summarize('ap', iou_thr=iou))) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = 'xxx' + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file, dump=False) coco_eval(result_files, eval_types, dataset.coco) # Save predictions in the COCO json format if args.json_out and rank == 0: if not isinstance(outputs[0], dict): results2json(dataset, outputs, args.json_out) else: for name in outputs[0]: outputs_ = [out[name] for out in outputs] result_file = args.json_out + '.{}'.format(name) results2json(dataset, outputs_, result_file)
def main(): # Use first line of file docstring as description if it exists. parser = argparse.ArgumentParser( description=__doc__.split("\n")[0] if __doc__ else "", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument("annotations_json", type=Path) parser.add_argument("results_json", type=Path) parser.add_argument("output_dir", type=Path) parser.add_argument("--type", default="segm", choices=["segm", "bbox"]) parser.add_argument("--dets-per-cat", default=10000, type=int) parser.add_argument("--ious", nargs="*", type=float) args = parser.parse_args() args.output_dir.mkdir(exist_ok=True, parents=True) logger = setup_logger(output=str(args.output_dir.resolve()), name=__file__) log_path = args.output_dir / "log.txt" assert args.dets_per_cat > 0 with open(args.results_json, "r") as f: results = json.load(f) by_cat = defaultdict(list) for ann in results: by_cat[ann["category_id"]].append(ann) results = [] topk = args.dets_per_cat missing_dets_cats = set() for cat, cat_anns in by_cat.items(): if len(cat_anns) < topk: missing_dets_cats.add(cat) results.extend( sorted(cat_anns, key=lambda x: x["score"], reverse=True)[:topk]) if missing_dets_cats: logger.warning( f"\n===\n" f"{len(missing_dets_cats)} classes had less than {topk} detections!\n" f"Outputting {topk} detections for each class will improve AP further.\n" f"If using detectron2, please use the lvdevil/infer_topk.py script to " f"output a results file with {topk} detections for each class.\n" f"===") gt = LVIS(args.annotations_json) results = LVISResults(gt, results, max_dets=-1) lvis_eval = LVISEval(gt, results, iou_type=args.type) params = lvis_eval.params params.max_dets = -1 # No limit on detections per image. if args.ious: params.iou_thrs = args.ious lvis_eval.run() lvis_eval.print_results() metrics = { k: v for k, v in lvis_eval.results.items() if k.startswith("AP") } logger.info("copypaste: %s,%s", ",".join(map(str, metrics.keys())), "path") logger.info( "copypaste: %s,%s", ",".join(f"{v*100:.2f}" for v in metrics.values()), log_path, )
def _non_dist_train(model, dataset, cfg, validate=False, logger=None, timestamp=None): if validate: raise NotImplementedError('Built-in validation is not implemented ' 'yet in not-distributed training. Use ' 'distributed training or test.py and ' '*eval.py scripts instead.') # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] data_loaders = [ build_dataloader( ds, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False) for ds in dataset ] # put model on gpus model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() # # if model.module.bbox_head.freeze_solov2_and_train_combonly: # if model.module.bbox_head.optimize_list is not None: # for (key, param) in model.named_parameters(): # # if 'kernel_convs_convcomb' not in key and 'context_fusion_convs' not in key and 'learned_weight' not in key: # if not any(s in key for s in model.module.bbox_head.optimize_list): # param.requires_grad=False # else: # # print('optimize {}'.format(key)) # logger.info('optimize {}'.format(key)) # build runner optimizer = build_optimizer(model, cfg.optimizer) runner = Runner( model, batch_processor, optimizer, cfg.work_dir, logger=logger) # an ugly walkaround to make the .log and .log.json filenames the same runner.timestamp = timestamp # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook( **cfg.optimizer_config, **fp16_cfg, distributed=False) else: optimizer_config = cfg.optimizer_config runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs) ## add test after training if cfg.data.test.ann_file != 'data/lvis/lvis_v0.5_val_lvis_freqset.json': # if val set is lvis freq, only eval on lvis-freq val set cfg.data.test.test_mode = True dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) model_orig=model.module model = MMDataParallel(model, device_ids=[0]).cuda() data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100] outputs = single_gpu_test(model, data_loader) print('\nwriting results to {}'.format('xxx')) # mmcv.dump(outputs, 'xxx') eval_types = ['segm'] if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = 'xxx' coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_files = results2json_segm(dataset, outputs, 'xxx', dump=False) coco_eval(result_files, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = 'xxx' + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file, dump=False) coco_eval(result_files, eval_types, dataset.coco) ##eval on lvis-77###### cfg.data.test.ann_file = 'data/lvis/lvis_v0.5_val_cocofied.json' cfg.data.test.img_prefix = 'data/lvis/val2017/' cfg.data.test.test_mode = True dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # model_orig=model.module # model = MMDataParallel(model, device_ids=[0]).cuda() data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100] outputs = single_gpu_test(model, data_loader) print('\nwriting results to {}'.format('xxx')) # mmcv.dump(outputs, 'xxx') eval_types = ['segm'] if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = 'xxx' coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_files = results2json_segm(dataset, outputs, 'xxx', dump=False) from lvis import LVISEval lvisEval = LVISEval('data/lvis/lvis_v0.5_val_cocofied.json', result_files, 'segm') lvisEval.run() lvisEval.print_results() # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999 lvisEval.params.iou_thrs[8] = 0.9 for iou in [0.5, 0.6, 0.7, 0.8, 0.9]: print('AP at iou {}: {}'.format(iou, lvisEval._summarize('ap', iou_thr=iou))) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = 'xxx' + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file, dump=False) coco_eval(result_files, eval_types, dataset.coco) else: ##eval on lvis-freq###### cfg.data.test.test_mode = True dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # model_orig=model.module # model = MMDataParallel(model, device_ids=[0]).cuda() data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100] outputs = single_gpu_test(model, data_loader) print('\nwriting results to {}'.format('xxx')) # mmcv.dump(outputs, 'xxx') eval_types = ['segm'] if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = 'xxx' coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_files = results2json_segm(dataset, outputs, 'xxx', dump=False) from lvis import LVISEval lvisEval = LVISEval(cfg.data.test.ann_file, result_files, 'segm') lvisEval.run() lvisEval.print_results() # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999 lvisEval.params.iou_thrs[8] = 0.9 for iou in [0.5, 0.6, 0.7, 0.8, 0.9]: print('AP at iou {}: {}'.format(iou, lvisEval._summarize('ap', iou_thr=iou))) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = 'xxx' + '.{}'.format(name) result_files = results2json(dataset, outputs_, result_file, dump=False) coco_eval(result_files, eval_types, dataset.coco)