def evaluation(dataset, all_boxes, all_segms, all_parss, all_pscores, clean_up=True): output_folder = os.path.join(cfg.CKPT, 'test') expected_results = () expected_results_sigma_tol = 4 coco_results = {} iou_types = ("bbox", ) coco_results["bbox"] = all_boxes if cfg.MODEL.MASK_ON: iou_types = iou_types + ("segm", ) coco_results["segm"] = all_segms if cfg.MODEL.PARSING_ON: iou_types = iou_types + ("parsing", ) coco_results['parsing'] = [all_parss, all_pscores] results = COCOResults(*iou_types) logging_rank("Evaluating predictions", local_rank=0) for iou_type in iou_types: if iou_type == "parsing": eval_ap = cfg.PRCNN.EVAL_AP num_parsing = cfg.PRCNN.NUM_PARSING assert len( cfg.TEST.DATASETS) == 1, 'Parsing only support one dataset now' im_dir = dataset_catalog.get_im_dir(cfg.TEST.DATASETS[0]) ann_fn = dataset_catalog.get_ann_fn(cfg.TEST.DATASETS[0]) res = evaluate_parsing(coco_results[iou_type], eval_ap, cfg.PRCNN.SCORE_THRESH, num_parsing, im_dir, ann_fn, output_folder) results.update_parsing(res) else: with tempfile.NamedTemporaryFile() as f: file_path = f.name if output_folder: file_path = os.path.join(output_folder, iou_type + ".json") res = evaluate_predictions_on_coco(dataset.coco, coco_results[iou_type], file_path, iou_type) results.update(res) logging_rank(results, local_rank=0) check_expected_results(results, expected_results, expected_results_sigma_tol) if output_folder: torch.save(results, os.path.join(output_folder, "coco_results.pth")) if clean_up: shutil.rmtree(output_folder) return results, coco_results
def get_image_count_frequency(num_classes, dataset_name): ann_file = get_ann_fn(dataset_name) with open(ann_file, "r") as f: a = json.load(f) categories = a["categories"] all_image_count = len(set([_["image_id"] for _ in a["annotations"]])) for x in categories: x["image_count_frequency"] = x["image_count"] / all_image_count image_count_frequency = [None] * num_classes for c in categories: category_id = c["id"] - 1 # conver to 0-based index image_count_frequency[category_id] = c["image_count_frequency"] return image_count_frequency
def build_dataset(dataset_list, is_train=True, local_rank=0): if not isinstance(dataset_list, (list, tuple)): raise RuntimeError( "dataset_list should be a list of strings, got {}".format( dataset_list)) for dataset_name in dataset_list: assert contains(dataset_name), 'Unknown dataset name: {}'.format( dataset_name) assert os.path.exists( get_im_dir(dataset_name)), 'Im dir \'{}\' not found'.format( get_im_dir(dataset_name)) logging_rank('Creating: {}'.format(dataset_name), local_rank=local_rank) transforms = build_transforms(is_train) datasets = [] for dataset_name in dataset_list: args = {} args['root'] = get_im_dir(dataset_name) args['ann_file'] = get_ann_fn(dataset_name) args['remove_images_without_annotations'] = is_train ann_types = ('bbox', ) if cfg.MODEL.MASK_ON: ann_types = ann_types + ('segm', ) if cfg.MODEL.KEYPOINT_ON: ann_types = ann_types + ('keypoints', ) if cfg.MODEL.PARSING_ON: ann_types = ann_types + ('parsing', ) if cfg.MODEL.UV_ON: ann_types = ann_types + ('uv', ) args['ann_types'] = ann_types args['transforms'] = transforms # make dataset from factory dataset = D.COCODataset(**args) datasets.append(dataset) # for training, concatenate all datasets into a single one dataset = datasets[0] if len(datasets) > 1: dataset = D.ConcatDataset(datasets) return dataset
def get_box_result(): box_results = [] with open(dataset_catalog.get_ann_fn(cfg.TEST.DATASETS[0])) as f: anns = json.load(f)['annotations'] for ann in anns: box_results.append({ "image_id": ann['image_id'], "category_id": ann['category_id'], "bbox": ann['bbox'], "score": 1.0, }) hier = ann['hier'] N = len(hier) // 5 for i in range(N): if hier[i * 5 + 4] > 0: x1, y1, x2, y2 = hier[i * 5:i * 5 + 4] bbox = [x1, y1, x2 - x1 + 1, y2 - y1 + 1] box_results.append({ "image_id": ann['image_id'], "category_id": i + 2, "bbox": bbox, "score": 1.0, }) return box_results