Пример #1
0
    def __init__(self, dataset_name, cfg, distributed, output_dir=None):
        """
        Args:
            dataset_name (str): name of the dataset to be evaluated.
                It must have the following corresponding metadata:
                "json_file": the path to the LVIS format annotation
            cfg (CfgNode): config instance
            distributed (True): if True, will collect results from all ranks for evaluation.
                Otherwise, will evaluate the results in the current process.
            output_dir (str): optional, an output directory to dump results.
        """
        from lvis import LVIS

        self._tasks = self._tasks_from_config(cfg)
        self._distributed = distributed
        self._output_dir = output_dir

        self._cpu_device = torch.device("cpu")
        self._logger = logging.getLogger(__name__)

        self._metadata = MetadataCatalog.get(dataset_name)
        json_file = PathManager.get_local_path(self._metadata.json_file)
        self._lvis_api = LVIS(json_file)
        # Test set json files do not contain annotations (evaluation must be
        # performed using the LVIS evaluation server).
        self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
Пример #2
0
 def load_annotations(self, ann_file):
     try:
         from lvis import LVIS
     except ImportError:
         raise ImportError('Please follow config/lvis/README.md to '
                           'install open-mmlab forked lvis first.')
     self.coco = LVIS(ann_file)
     assert not self.custom_classes, 'LVIS custom classes is not supported'
     self.cat_ids = self.coco.get_cat_ids()
     # print(self.cat_ids)
     self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
     self.img_ids = self.coco.get_img_ids()
     data_infos = []
     for i in self.img_ids:
         info = self.coco.load_imgs([i])[0]
         info['filename'] = info["coco_url"].split("/")[-2]+'/'+info["coco_url"].split("/")[-1]
         # print(list(info))
         # print(info['file_name'])
         # if info['file_name'].startswith('COCO'):
         #     # Convert form the COCO 2014 file naming convention of
         #     # COCO_[train/val/test]2014_000000000000.jpg to the 2017
         #     # naming convention of 000000000000.jpg
         #     # (LVIS v1 will fix this naming issue)
         #     info['filename'] = info['file_name'][-16:]
         # else:
         #     info['filename'] = info['file_name']
         data_infos.append(info)
     return data_infos
Пример #3
0
 def load_annotations(self, ann_file):
     try:
         import lvis
         if getattr(lvis, '__version__', '0') >= '10.5.3':
             warnings.warn(
                 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"',  # noqa: E501
                 UserWarning)
         from lvis import LVIS
     except ImportError:
         raise ImportError(
             'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".'  # noqa: E501
         )
     self.coco = LVIS(ann_file)
     self.cat_ids = self.coco.get_cat_ids()
     self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
     self.img_ids = self.coco.get_img_ids()
     data_infos = []
     for i in self.img_ids:
         info = self.coco.load_imgs([i])[0]
         # coco_url is used in LVISv1 instead of file_name
         # e.g. http://images.cocodataset.org/train2017/000000391895.jpg
         # train/val split in specified in url
         info['filename'] = info['coco_url'].replace(
             'http://images.cocodataset.org/', '')
         data_infos.append(info)
     return data_infos
Пример #4
0
    def __init__(self, dataset_name, meta, cfg, distributed, output_dir=None, dump=False):
        """
        Args:
            dataset_name (str): name of the dataset to be evaluated.
                It must have the following corresponding metadata:

                    "json_file": the path to the LVIS format annotation

            meta (SimpleNamespace): dataset metadata.
            cfg (CfgNode): cvpods Config instance.
            distributed (True): if True, will collect results from all ranks for evaluation.
                Otherwise, will evaluate the results in the current process.
            output_dir (str): optional, an output directory to dump results.
            dump (bool): If True, after the evaluation is completed, a Markdown file
                that records the model evaluation metrics and corresponding scores
                will be generated in the working directory.
        """
        from lvis import LVIS
        # TODO: really use dataset_name
        self.dataset_name = dataset_name
        self._dump = dump
        self._tasks = self._tasks_from_config(cfg)
        self._distributed = distributed
        self._output_dir = output_dir
        self._cpu_device = torch.device("cpu")

        self._metadata = meta
        # json_file = PathManager.get_local_path(self._metadata.json_file)
        self._lvis_api = LVIS(self._metadata.json_file)
        # Test set json files do not contain annotations (evaluation must be
        # performed using the LVIS evaluation server).
        self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
Пример #5
0
    def load_annotations(self, ann_file):
        """Load annotation from lvis style annotation file.

        Args:
            ann_file (str): Path of annotation file.

        Returns:
            list[dict]: Annotation info from LVIS api.
        """

        try:
            from lvis import LVIS
        except ImportError:
            raise ImportError('Please follow config/lvis/README.md to '
                              'install open-mmlab forked lvis first.')
        self.coco = LVIS(ann_file)
        assert not self.custom_classes, 'LVIS custom classes is not supported'
        self.cat_ids = self.coco.get_cat_ids()
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.img_ids = self.coco.get_img_ids()
        data_infos = []
        for i in self.img_ids:
            info = self.coco.load_imgs([i])[0]
            if info['file_name'].startswith('COCO'):
                # Convert form the COCO 2014 file naming convention of
                # COCO_[train/val/test]2014_000000000000.jpg to the 2017
                # naming convention of 000000000000.jpg
                # (LVIS v1 will fix this naming issue)
                info['filename'] = info['file_name'][-16:]
            else:
                info['filename'] = info['file_name']
            data_infos.append(info)
        return data_infos
Пример #6
0
 def load_annotations(self, ann_file):
     try:
         import lvis
         assert lvis.__version__ >= '10.5.3'
         from lvis import LVIS
     except AssertionError:
         raise AssertionError('Incompatible version of lvis is installed. '
                              'Run pip uninstall lvis first. Then run pip '
                              'install mmlvis to install open-mmlab forked '
                              'lvis. ')
     except ImportError:
         raise ImportError('Package lvis is not installed. Please run pip '
                           'install mmlvis to install open-mmlab forked '
                           'lvis.')
     self.coco = LVIS(ann_file)
     assert not self.custom_classes, 'LVIS custom classes is not supported'
     self.cat_ids = self.coco.get_cat_ids()
     self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
     self.img_ids = self.coco.get_img_ids()
     data_infos = []
     for i in self.img_ids:
         info = self.coco.load_imgs([i])[0]
         # coco_url is used in LVISv1 instead of file_name
         # e.g. http://images.cocodataset.org/train2017/000000391895.jpg
         # train/val split in specified in url
         info['filename'] = info['coco_url'].replace(
             'http://images.cocodataset.org/', '')
         data_infos.append(info)
     return data_infos
def build_toy_dataset_lvis(json_input="lvis_v0.5_train",
                           json_output="lvis_v0.5_train_one_forth_new",
                           sampling_rate=0.25):
    """
        Build a sampled version of LVIS dataset for shorter training time.

        Args:
            json_input (str): full path to the LVIS json annotation file.
            json_output (str): full path to the save location of new json file.
            sampling_rate (int): if sampling rate = 1/4, 1/4th of images are saved in json_output.

        """
    from lvis import LVIS
    import json
    import numpy as np
    import os
    from detectron2.utils.logger import setup_logger

    logger = setup_logger(name=__name__)

    cwd = os.getcwd()
    logger.info(
        "Starting to build the sampled LVIS dataset, starting from {}".format(
            cwd))
    annFile_input = './datasets/lvis/{}.json'.format(json_input)
    annFile_output = './datasets/lvis/{}.json'.format(json_output)

    # initialize LVIS api for instance annotations
    lvis = LVIS(annFile_input)

    # copy categories, info and licences from the original dataset
    data = {
        "info": lvis.dataset["info"],
        "images": [],
        "annotations": [],
        "categories": lvis.dataset["categories"],
        "licenses": lvis.dataset["licenses"]
    }

    logger.info("before permutation")
    # select random image ids according to sampling rate
    img_ids = np.random.permutation(list(lvis.imgs.keys()))
    logger.info("before randomizing")
    img_ids = np.random.choice(img_ids,
                               size=round(len(img_ids) * sampling_rate),
                               replace=False)

    logger.info("before selecting images in dict")
    # adding selected images to the json file
    data["images"] = _search_dict_by_id(lvis.imgs, img_ids)

    logger.info("before filtering annotations")
    # adding corresponding annotations to json file
    for imgid in img_ids:
        for ann in lvis.img_ann_map[imgid]:
            data["annotations"].append(ann)

    logger.info("before writing into the file")
    with open(annFile_output, 'w') as outfile:
        json.dump(data, outfile, indent=4)
Пример #8
0
    def _load_metadata(self) -> bool:
        must_load_api = self.lvis_api is None
        must_load_img_ids = self.img_ids is None
        try:
            # Load metadata
            if must_load_api:
                if self.train:
                    ann_json_path = str(self.root / "lvis_v1_train.json")
                else:
                    ann_json_path = str(self.root / "lvis_v1_val.json")

                self.lvis_api = LVIS(ann_json_path)

            if must_load_img_ids:
                self.img_ids = list(sorted(self.lvis_api.get_img_ids()))

            self.targets = LVISDetectionTargets(self.lvis_api, self.img_ids)

            # Try loading an image
            if len(self.img_ids) > 0:
                img_id = self.img_ids[0]
                img_dict: LVISImgEntry = \
                    self.lvis_api.load_imgs(ids=[img_id])[0]
                assert self._load_img(img_dict) is not None
        except BaseException:
            if must_load_api:
                self.lvis_api = None
            if must_load_img_ids:
                self.img_ids = None

            self.targets = None
            raise

        return True
Пример #9
0
def eval_single(data):
    global GLOBAL_SYN
    global GLOBAL_RESULTS
    category, json_file_path, score_threshold, results_path, save_path, text_output = data
    sys.stdout = open(text_output, 'w')

    score_threshold = float(score_threshold)

    lvis_api = LVIS(json_file_path)
    lvis_api.load_cats(None)

    if not GLOBAL_SYN:
        syn_to_id = {}
        for cat_id, cat_data in lvis_api.cats.items():
            syn_to_id[cat_data['synonyms'][0]] = cat_id
        GLOBAL_SYN = syn_to_id

    syn_to_id = GLOBAL_SYN
    category_id = syn_to_id[category]

    if results_path not in GLOBAL_RESULTS:
        lvis_results = LVISResults(lvis_api, results_path)
        GLOBAL_RESULTS[results_path] = lvis_results

    lvis_results = GLOBAL_RESULTS[results_path]

    lvis_eval(lvis_api,
              lvis_results,
              category_id,
              score_threshold=score_threshold)
    sys.stdout.close()
Пример #10
0
def load_lvis_json(json_file, image_root, dataset_name=None):
    from lvis import LVIS

    json_file = PathManager.get_local_path(json_file)

    timer = Timer()
    lvis_api = LVIS(json_file)
    if timer.seconds() > 1:
        logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))

    if dataset_name is not None:
        meta = get_lvis_instances_meta(dataset_name)
        MetadataCatalog.get(dataset_name).set(**meta)

    img_ids = sorted(lvis_api.imgs.keys())
    imgs = lvis_api.load_imgs(img_ids)
    anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]

    ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
    assert len(set(ann_ids)) == len(ann_ids), \
        f"Annotation ids in '{json_file}' are not unique"

    imgs_anns = list(zip(imgs, anns))

    logger.info(f"Loaded {len(imgs_anns)} images in the LVIS format from {json_file}")

    dataset_dicts = []

    for (img_dict, anno_dict_list) in imgs_anns:
        record = {}
        file_name = img_dict["file_name"]
        if img_dict["file_name"].startswith("COCO"):
            file_name = file_name[-16:]
        record["file_name"] = os.path.join(image_root, file_name)
        record["height"] = img_dict["height"]
        record["width"] = img_dict["width"]
        record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
        record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
        image_id = record["image_id"] = img_dict["id"]

        objs = []
        for anno in anno_dict_list:
            assert anno["image_id"] == image_id

            obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
            obj["category_id"] = anno["category_id"] - 1
            segm = anno["segmentation"]
            valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
            assert len(segm) == len(valid_segm), \
                "Annotation contains an invalid polygon with < 3 points"
            assert len(segm) > 0

            obj["segmentation"] = segm
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)

    return dataset_dicts
Пример #11
0
    def load_annotations(self, ann_file):
        self.lvis = LVIS(ann_file)
        self.cat_ids = self.lvis.get_cat_ids()
        self.cat2label = {
            cat_id: i + 1
            for i, cat_id in enumerate(self.cat_ids)
        }
        self.CLASSES = [_ for _ in self.cat_ids]
        self.cat_instance_count = [_ for _ in self.cat_ids]
        self.cat_image_count = [_ for _ in self.cat_ids]
        img_count_lbl = ["r", "c", "f"]
        self.freq_groups = [[] for _ in img_count_lbl]
        self.cat_group_idxs = [_ for _ in self.cat_ids]
        freq_group_count = {'f': 0, 'cf': 0, 'rcf': 0}
        self.cat_fake_idxs = {
            'f': [-1 for _ in self.cat_ids],
            'cf': [-1 for _ in self.cat_ids],
            'rcf': [-1 for _ in self.cat_ids]
        }
        self.freq_group_dict = {'rcf': (0, 1, 2), 'cf': (1, 2), 'f': (2, )}
        for value in self.lvis.cats.values():
            idx = value['id'] - 1
            self.CLASSES[idx] = value['name']
            self.cat_instance_count[idx] = value['instance_count']
            self.cat_image_count[idx] = value['image_count']
            group_idx = img_count_lbl.index(value["frequency"])
            self.freq_groups[group_idx].append(idx + 1)
            self.cat_group_idxs[idx] = group_idx
            if group_idx == 0:  # rare
                freq_group_count['rcf'] += 1
                self.cat_fake_idxs['rcf'][idx] = freq_group_count['rcf']
            if group_idx == 1:  # common
                freq_group_count['rcf'] += 1
                freq_group_count['cf'] += 1
                self.cat_fake_idxs['rcf'][idx] = freq_group_count['rcf']
                self.cat_fake_idxs['cf'][idx] = freq_group_count['cf']
            elif group_idx == 2:  # freq
                freq_group_count['rcf'] += 1
                freq_group_count['cf'] += 1
                freq_group_count['f'] += 1
                self.cat_fake_idxs['rcf'][idx] = freq_group_count['rcf']
                self.cat_fake_idxs['cf'][idx] = freq_group_count['cf']
                self.cat_fake_idxs['f'][idx] = freq_group_count['f']

        if self.samples_per_cls_file is not None:
            with open(self.samples_per_cls_file, 'w') as file:
                file.writelines(str(x) + '\n' for x in self.cat_instance_count)

        self.img_ids = self.lvis.get_img_ids()
        img_infos = []
        for i in self.img_ids:
            info = self.lvis.load_imgs([i])[0]
            info['filename'] = info['file_name']
            img_infos.append(info)
        return img_infos
Пример #12
0
 def __init__(
         self,
         root: str,
         annFile: str,
         transform: Optional[Callable] = None,
         target_transform: Optional[Callable] = None,
         transforms: Optional[Callable] = None,
 ) -> None:
     super(LVISDetection, self).__init__(root, transforms, transform, target_transform)
     self.lvis = LVIS(annFile)
     self.ids = list(sorted(self.lvis.imgs.keys()))
Пример #13
0
    def __init__(self, run_path, model_ckpt):
        self.lvis_gt = LVIS(ANNOTATION_PATH)
        self.lvis_dt = LVISResults(self.lvis_gt, PREDICTION_PATH)
        self.run_path = run_path
        self.model_ckpt = model_ckpt

        self._build_coco_to_lvis_map()
        cocoEval = LVISEval(self.lvis_gt, self.lvis_dt, 'segm')
        self.freq_groups = cocoEval._prepare_freq_group()
        config_path = os.path.join(self.run_path, 'config_lvis.yaml')
        self.config = yaml.load(open(config_path, "r"), Loader=yaml.FullLoader)
Пример #14
0
 def __init__(
         self,
         root: str,
         annFile: str,
         subset:float,
         transform: Optional[Callable] = None,
         target_transform: Optional[Callable] = None,
         transforms: Optional[Callable] = None,
 ) -> None:
     super(LVISDetection, self).__init__(root, transforms, transform, target_transform)
     self.lvis = LVIS(annFile)
     self.ids = list(sorted(self.lvis.imgs.keys()))
     
     self.ids = sorted(random.sample(self.ids, int(len(self.ids)*subset)))
Пример #15
0
 def load_annotations(self, ann_file):
     self.lvis = LVIS(ann_file)
     self.cat_ids = self.lvis.get_cat_ids()
     self.cat2label = {
         cat_id: i + 1
         for i, cat_id in enumerate(self.cat_ids)
     }
     self.img_ids = self.lvis.get_img_ids()
     img_infos = []
     for i in self.img_ids:
         info = self.lvis.load_imgs([i])[0]
         info['filename'] = info['file_name']
         img_infos.append(info)
     return img_infos
Пример #16
0
 def load_annotations(self, ann_file):
     self.lvis = LVIS(ann_file)
     COCO_CLASSES = sorted(list(CocoDataset.CLASSES))
     self.synonyms_classes = [
         value['synonyms'] for value in self.lvis.cats.values()
     ]
     self.cat_ids = []
     self.CLASSES = []
     self.not_in_classes = COCO_CLASSES
     for id in self.lvis.get_cat_ids():
         for name in self.lvis.cats[id]['synonyms']:
             if name in self.LVIS_TO_COCO:
                 self.cat_ids.append(id)
                 self.CLASSES.append(name)
                 self.not_in_classes.remove(self.LVIS_TO_COCO[name])
                 break
             elif '(' not in name and name in COCO_CLASSES:
                 self.cat_ids.append(id)
                 self.CLASSES.append(name)
                 self.not_in_classes.remove(name)
                 break
             elif '_' in name:
                 new_name = name.split('_(')[0]
                 if new_name in COCO_CLASSES:
                     self.cat_ids.append(id)
                     self.CLASSES.append(name)
                     self.not_in_classes.remove(new_name)
                     break
     data_dir = osp.dirname(ann_file)
     with open(osp.join(data_dir, 'synonyms_classes.json'), 'w') as f:
         f.write(json.dumps(self.synonyms_classes, indent=2))
     with open(osp.join(data_dir, 'not_in_classes.json'), 'w') as f:
         f.write(json.dumps(self.not_in_classes, indent=2))
     with open(osp.join(data_dir, 'coco_classes.json'), 'w') as f:
         f.write(json.dumps(COCO_CLASSES, indent=2))
     with open(osp.join(data_dir, 'lvis_coco_classes.json'), 'w') as f:
         f.write(json.dumps(self.CLASSES, indent=2))
     self.CLASSES = tuple(self.CLASSES)
     self.cat2label = {
         cat_id: i + 1
         for i, cat_id in enumerate(self.cat_ids)
     }
     self.CLASSES = CocoDataset.CLASSES
     self.img_ids = self.lvis.get_img_ids()
     img_infos = []
     for i in self.img_ids:
         info = self.lvis.load_imgs([i])[0]
         info['filename'] = info['file_name']
         img_infos.append(info)
     return img_infos
Пример #17
0
    def __init__(self, dataset_name, cfg, distributed, output_dir=None):
        from lvis import LVIS

        self._tasks = self._tasks_from_config(cfg)
        self._distributed = distributed
        self._output_dir = output_dir

        self._cpu_device = torch.device("cpu")
        self._logger = logging.getLogger(__name__)

        self._metadata = MetadataCatalog.get(dataset_name)
        json_file = PathManager.get_local_path(self._metadata.json_file)
        self._lvis_api = LVIS(json_file)
        self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
Пример #18
0
    def __init__(self, device, config, dt_path=r'output/inference'):
        self.device = device
        self.lvis_gt = LVIS(
            '/scratch/users/zzweng/datasets/lvis/lvis_v0.5_val.json')
        self.dt_path = os.path.join(dt_path, 'lvis_instances_results.json')
        self.dt = LVISResults(self.lvis_gt, self.dt_path)
        self.normalize = Normalize(mean=[0.485, 0.456, 0.406],
                                   std=[0.229, 0.224, 0.225])

        if 'human_car' in config['desc']:
            self.img_ids = self._get_img_ids(cat_ids=[805, 211])
        else:
            self.img_ids = self._get_img_ids(cat_ids=None)
        print('Total number of images in the training set: {}'.format(
            len(self.img_ids)))
Пример #19
0
    def __init__(
        self,
        dataset_name,
        tasks=None,
        distributed=True,
        output_dir=None,
        *,
        max_dets_per_image=None,
    ):
        """
        Args:
            dataset_name (str): name of the dataset to be evaluated.
                It must have the following corresponding metadata:
                "json_file": the path to the LVIS format annotation
            tasks (tuple[str]): tasks that can be evaluated under the given
                configuration. A task is one of "bbox", "segm".
                By default, will infer this automatically from predictions.
            distributed (True): if True, will collect results from all ranks for evaluation.
                Otherwise, will evaluate the results in the current process.
            output_dir (str): optional, an output directory to dump results.
            max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
                This limit, by default of the LVIS dataset, is 300.
        """
        from lvis import LVIS

        self._logger = logging.getLogger(__name__)

        if tasks is not None and isinstance(tasks, CfgNode):
            self._logger.warn(
                "COCO Evaluator instantiated using config, this is deprecated behavior."
                " Please pass in explicit arguments instead.")
            self._tasks = None  # Infering it from predictions should be better
        else:
            self._tasks = tasks

        self._distributed = distributed
        self._output_dir = output_dir
        self._max_dets_per_image = max_dets_per_image

        self._cpu_device = torch.device("cpu")

        self._metadata = MetadataCatalog.get(dataset_name)
        json_file = PathManager.get_local_path(self._metadata.json_file)
        self._lvis_api = LVIS(json_file)
        # Test set json files do not contain annotations (evaluation must be
        # performed using the LVIS evaluation server).
        self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
Пример #20
0
 def load_annotations(self, ann_file):
     try:
         from lvis import LVIS
     except ImportError:
         raise ImportError('Please follow install.md to '
                           'install open-mmlab forked cocoapi first.')
     self.coco = LVIS(ann_file)
     assert not self.custom_classes, 'LVIS custom classes is not supported'
     self.cat_ids = self.coco.get_cat_ids()
     self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
     self.img_ids = self.coco.get_img_ids()
     data_infos = []
     for i in self.img_ids:
         info = self.coco.load_imgs([i])[0]
         info['filename'] = info['file_name']
         data_infos.append(info)
     return data_infos
Пример #21
0
def create_lvis_semantic_from_instance(instance_json, sem_seg_root):
    """
    Create semantic segmentation annotations from panoptic segmentation
    annotations, to be used by PanopticFPN.

    It maps all thing categories to contiguous ids starting from 1, and maps all unlabeled pixels to class 0

    Args:
        instance_json (str): path to the instance json file, in COCO's format.
        sem_seg_root (str): a directory to output semantic annotation files
    """
    os.makedirs(sem_seg_root, exist_ok=True)

    lvis_detection = LVIS(instance_json)

    def iter_annotations():
        for img_id in lvis_detection.get_img_ids():
            anns_ids = lvis_detection.get_ann_ids([img_id])
            anns = lvis_detection.load_anns(anns_ids)
            img = lvis_detection.load_imgs([img_id])[0]
            file_name = os.path.splitext(img["file_name"])[0]
            output = os.path.join(sem_seg_root, file_name + '.npz')
            yield anns, output, img

    # # single process
    # print("Start writing to {} ...".format(sem_seg_root))
    # start = time.time()
    # for anno, oup, img in iter_annotations():
    #     _process_instance_to_semantic(
    #         anno, oup, img)
    # print("Finished. time: {:.2f}s".format(time.time() - start))
    # return

    pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4))

    print("Start writing to {} ...".format(sem_seg_root))
    start = time.time()
    pool.starmap(
        functools.partial(
            _process_instance_to_semantic),
        iter_annotations(),
        chunksize=100,
    )
    print("Finished. time: {:.2f}s".format(time.time() - start))
Пример #22
0
    def load_annotations(self, ann_file):
        """Load annotation from lvis style annotation file.

        Args:
            ann_file (str): Path of annotation file.

        Returns:
            list[dict]: Annotation info from LVIS api.
        """

        try:
            import lvis
            assert lvis.__version__ >= '10.5.3'
            from lvis import LVIS
        except AssertionError:
            raise AssertionError('Incompatible version of lvis is installed. '
                                 'Run pip uninstall lvis first. Then run pip '
                                 'install mmlvis to install open-mmlab forked '
                                 'lvis. ')
        except ImportError:
            raise ImportError('Package lvis is not installed. Please run pip '
                              'install mmlvis to install open-mmlab forked '
                              'lvis.')
        self.coco = LVIS(ann_file)
        #assert not self.custom_classes, 'LVIS custom classes is not supported'

        self.cat_ids = self.coco.get_cat_ids()
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.img_ids = self.coco.get_img_ids()
        data_infos = []
        for i in self.img_ids:
            info = self.coco.load_imgs([i])[0]
            if info['file_name'].startswith('COCO'):
                # Convert form the COCO 2014 file naming convention of
                # COCO_[train/val/test]2014_000000000000.jpg to the 2017
                # naming convention of 000000000000.jpg
                # (LVIS v1 will fix this naming issue)
                info['filename'] = info['file_name'][-16:]
            else:
                info['filename'] = info['file_name']
            data_infos.append(info)
        return data_infos
Пример #23
0
    def __init__(self):
        self.lvis = LVIS('/scratch/users/zzweng/datasets/lvis/lvis_v0.5_val.json')
        self.dt_path = 'output/inference/lvis_instances_results.json'
        self.lvis_dt = LVISResults(self.lvis, self.dt_path)
        
        coco_map = json.load(open('lvis-api/data/coco_to_synset.json'))
        synset_to_lvis = {cat['synset']: cat['id'] for cat in self.lvis.cats.values()}
        synset_to_lvis['oven.n.01'] = synset_to_lvis['toaster_oven.n.01']
        synset_to_lvis['frank.n.02'] = synset_to_lvis['sausage.n.01']

        coco_to_lvis = {}
        lvis_to_coco = {}
        for item in coco_map.values():
            coco_id, lvis_id = item['coco_cat_id'], synset_to_lvis[item['synset']]
            coco_to_lvis[coco_id] = lvis_id
            lvis_to_coco[lvis_id] = coco_id
        self.coco_to_lvis = coco_to_lvis
        self.lvis_to_coco = lvis_to_coco
        cocoEval = LVISEval(self.lvis, self.lvis_dt,'segm')
        self.freq_groups = cocoEval._prepare_freq_group()
Пример #24
0
def _load_lvis_annotations(json_file: str):
    """
    Load COCO annotations from a JSON file

    Args:
        json_file: str
            Path to the file to load annotations from
    Returns:
        Instance of `pycocotools.coco.COCO` that provides access to annotations
        data
    """
    from lvis import LVIS

    json_file = PathManager.get_local_path(json_file)
    logger = logging.getLogger(__name__)
    timer = Timer()
    lvis_api = LVIS(json_file)
    if timer.seconds() > 1:
        logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
    return lvis_api
Пример #25
0
 def load_annotations(self, ann_file):
     try:
         from lvis import LVIS
     except ImportError:
         raise ImportError('Please follow config/lvis/README.md to '
                           'install open-mmlab forked lvis first.')
     self.coco = LVIS(ann_file)
     assert not self.custom_classes, 'LVIS custom classes is not supported'
     self.cat_ids = self.coco.get_cat_ids()
     self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
     self.img_ids = self.coco.get_img_ids()
     data_infos = []
     for i in self.img_ids:
         info = self.coco.load_imgs([i])[0]
         # coco_url is used in LVISv1 instead of file_name
         # e.g. http://images.cocodataset.org/train2017/000000391895.jpg
         # train/val split in specified in url
         info['filename'] = info['coco_url'].replace(
             'http://images.cocodataset.org/', '')
         data_infos.append(info)
     return data_infos
Пример #26
0
def lvis_eval(result_files, result_types, lvis, max_dets=(100, 300, 1000)):
    for res_type in result_types:
        assert res_type in [
            'proposal_fast', 'proposal', 'bbox', 'segm'
        ]

    if mmcv.is_str(lvis):
        lvis = LVIS(lvis)
    assert isinstance(lvis, LVIS)

    img_ids = lvis.get_img_ids()
    for res_type in result_types:
        result_file = result_files['proposal' if res_type == 'proposal_fast' else res_type]
        if isinstance(result_file, str):
            assert result_file.endswith('.json')

        iou_type = 'bbox' if res_type in ['proposal', 'proposal_fast'] else res_type
        lvisEval = LVISEvalCustom(lvis, result_file, iou_type)
        lvisEval.params.img_ids = img_ids
        if res_type == 'proposal_fast':
            lvis_fast_eval_recall(result_file, lvisEval, np.array(max_dets))
            continue
        elif res_type == 'proposal':
            lvisEval.params.use_proposal = True
            for max_det in max_dets:
                lvisEval.params.max_dets = max_det
                lvisEval.run()
                for area_rng in ["small", "medium", "large"]:
                    key = "AR{}@{}".format(area_rng[0], max_det)
                    print('{}={:.3f}'.format(key, lvisEval.get_results()[key]))
                freq_group = lvisEval.params.img_count_lbl
                for idx in range(len(freq_group)):
                    key = "AR{}@{}".format(freq_group[idx][0], max_det)
                    print('{}={:.3f}'.format(key, lvisEval.get_results()[key]))
                key = "AR@{}".format(max_det)
                print('{}={:.3f}'.format(key, lvisEval.get_results()[key]))
            continue
        lvisEval.run()
        print('-'*8+'{} results'.format(res_type)+'-'*8)
        lvisEval.print_results()
Пример #27
0
    def __init__(self, lvis_gt, lvis_dt, iou_type="segm"):
        """Constructor for LVISEval.
        Args:
            lvis_gt (LVIS class instance, or str containing path of annotation file)
            lvis_dt (LVISResult class instance, or str containing path of result file,
            or list of dict)
            iou_type (str): segm or bbox evaluation
        """
        self.logger = logging.getLogger(__name__)

        if iou_type not in ["bbox", "segm"]:
            raise ValueError("iou_type: {} is not supported.".format(iou_type))

        if isinstance(lvis_gt, LVIS):
            self.lvis_gt = lvis_gt
        elif isinstance(lvis_gt, str):
            self.lvis_gt = LVIS(lvis_gt)
        else:
            raise TypeError("Unsupported type {} of lvis_gt.".format(lvis_gt))

        if isinstance(lvis_dt, LVISResults):
            self.lvis_dt = lvis_dt
        elif isinstance(lvis_dt, (str, list)):
            # set max_dets=-1 to avoid ignoring
            self.lvis_dt = LVISResults(self.lvis_gt, lvis_dt, max_dets=-1)
        else:
            raise TypeError("Unsupported type {} of lvis_dt.".format(lvis_dt))

        # per-image per-category evaluation results
        self.eval_imgs = defaultdict(list)
        self.eval = {}  # accumulated evaluation results
        self._gts = defaultdict(list)  # gt for evaluation
        self._dts = defaultdict(list)  # dt for evaluation
        self.params = ParamsCustom(iou_type=iou_type)  # parameters
        self.results = OrderedDict()
        self.ious = {}  # ious between all gts and dts

        self.params.img_ids = sorted(self.lvis_gt.get_img_ids())
        self.params.cat_ids = sorted(self.lvis_gt.get_cat_ids())
Пример #28
0
    def load_annotations(self, ann_file):
        """Load annotation from lvis style annotation file.

        Args:
            ann_file (str): Path of annotation file.

        Returns:
            list[dict]: Annotation info from LVIS api.
        """

        try:
            import lvis
            if getattr(lvis, '__version__', '0') >= '10.5.3':
                warnings.warn(
                    'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"',  # noqa: E501
                    UserWarning)
            from lvis import LVIS
        except ImportError:
            raise ImportError(
                'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".'  # noqa: E501
            )
        self.coco = LVIS(ann_file)
        self.cat_ids = self.coco.get_cat_ids()
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.img_ids = self.coco.get_img_ids()
        data_infos = []
        for i in self.img_ids:
            info = self.coco.load_imgs([i])[0]
            if info['file_name'].startswith('COCO'):
                # Convert form the COCO 2014 file naming convention of
                # COCO_[train/val/test]2014_000000000000.jpg to the 2017
                # naming convention of 000000000000.jpg
                # (LVIS v1 will fix this naming issue)
                info['filename'] = info['file_name'][-16:]
            else:
                info['filename'] = info['file_name']
            data_infos.append(info)
        return data_infos
Пример #29
0
    def load_annotations(self, ann_file):
        self.lvis = LVIS(ann_file)
        self.full_cat_ids = self.lvis.get_cat_ids()
        self.full_cat2label = {
            cat_id: i + 1
            for i, cat_id in enumerate(self.full_cat_ids)
        }

        self.CLASSES = tuple(
            [item['name'] for item in self.lvis.dataset['categories']])
        self.cat_ids = self.lvis.get_cat_ids()
        self.cat2label = {
            cat_id: i + 1
            for i, cat_id in enumerate(self.cat_ids)
        }

        self.img_ids = self.lvis.get_img_ids()
        img_infos = []
        for i in self.img_ids:
            info = self.lvis.load_imgs([i])[0]
            # info['filename'] = info['file_name'].split('_')[-1]
            info['filename'] = info['file_name']
            img_infos.append(info)
        return img_infos
Пример #30
0
    def load_annotations(self, ann_file):
        """Load annotation from COCO style annotation file.

        Args:
            ann_file (str): Path of annotation file.

        Returns:
            list[dict]: Annotation info from COCO api.
        """
        try:
            from lvis import LVIS
        except ImportError:
            raise ImportError('Please follow config/lvis/README.md to '
                              'install open-mmlab forked lvis first.')
        self.coco = LVIS(ann_file)
        self.cat_ids = self.coco.get_cat_ids()
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.img_ids = self.coco.get_img_ids()
        data_infos = []
        for i in self.img_ids:
            info = self.coco.load_imgs([i])[0]
            info['filename'] = info['file_name']
            data_infos.append(info)
        return data_infos