Пример #1
0
    def _eval_predictions(self, tasks):
        """
        Evaluate self._predictions on the given tasks.
        Fill self._results with the metrics of the tasks.
        """
        logger.info("Preparing results for CrowdHuman format ...")
        self._coco_results = self._predictions

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "coco_instances_results.json")
            logger.info("Saving results to {}".format(file_path))

            with megfile.smart_open(file_path, "w") as f:
                for db in self._coco_results:
                    line = json.dumps(db) + '\n'
                    f.write(line)

        logger.info("Evaluating predictions ...")
        for task in sorted(tasks):
            coco_eval = (
                _evaluate_predictions_on_crowdhuman(self._metadata.json_file,
                                                    file_path)
                if len(self._coco_results) > 0 else
                None  # cocoapi does not handle empty results very well
            )
            res, small_table = self._derive_coco_results(coco_eval, task)
            self._results[task] = res

            return small_table
Пример #2
0
    def evaluate(self):
        if self._distributed:
            comm.synchronize()
            self._predictions = comm.gather(self._predictions, dst=0)
            self._predictions = list(itertools.chain(*self._predictions))

            if not comm.is_main_process():
                return {}

        if len(self._predictions) == 0:
            logger.warning(
                "[COCOEvaluator] Did not receive valid predictions.")
            return {}

        if self._output_dir:
            ensure_dir(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "instances_predictions.pth")
            with megfile.smart_open(file_path, "wb") as f:
                torch.save(self._predictions, f)

        self._results = OrderedDict()
        if "instances" in self._predictions[0]:
            self._eval_predictions(set(self._tasks))

        if self._dump:
            _dump_to_markdown(self._dump_infos)

        # Copy so the caller can do whatever with results
        return copy.deepcopy(self._results)
Пример #3
0
    def process(self, inputs, outputs):
        """
        Args:
            inputs: the inputs to a model.
                It is a list of dicts. Each dict corresponds to an image and
                contains keys like "height", "width", "file_name".
            outputs: the outputs of a model. It is either list of semantic segmentation predictions
                (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
                segmentation prediction in the same format.
        """
        for input, output in zip(inputs, outputs):
            output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
            pred = np.array(output, dtype=np.int)
            with megfile.smart_open(
                    self.input_file_to_gt_file[input["file_name"]], "rb") as f:
                gt = np.array(Image.open(f), dtype=np.int)

            gt[gt == self._ignore_label] = self._num_classes

            self._conf_matrix += np.bincount(
                self._N * pred.reshape(-1) + gt.reshape(-1),
                minlength=self._N**2).reshape(self._N, self._N)

            self._predictions.extend(
                self.encode_json_sem_seg(pred, input["file_name"]))
Пример #4
0
    def save(self, name: str, tag_checkpoint: bool = True, **kwargs: dict):
        """
        Dump model and checkpointables to a file.

        Args:
            name (str): name of the file.
            kwargs (dict): extra arbitrary data to save.
        """
        if not self.save_dir or not self.save_to_disk:
            return

        data = {}
        data["model"] = self.model.state_dict()
        for key, obj in self.checkpointables.items():
            data[key] = obj.state_dict()
        data.update(kwargs)

        basename = "{}.pth".format(name)
        save_file = os.path.join(self.save_dir, basename)
        assert os.path.basename(save_file) == basename, basename
        logger.info("Saving checkpoint to {}".format(save_file))
        with megfile.smart_open(save_file, "wb") as f:
            torch.save(data, f)

        if tag_checkpoint:
            self.tag_last_checkpoint(basename)
Пример #5
0
    def _load_semantic_annotations(self, image_dir, gt_dir):
        """
        Args:
            image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
            gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".

        Returns:
            list[dict]: a list of dict, each has "file_name" and
                "sem_seg_file_name".
        """
        ret = []
        for image_file in glob.glob(os.path.join(image_dir, "**/*.png")):
            suffix = "leftImg8bit.png"
            assert image_file.endswith(suffix)
            prefix = image_dir

            label_file = (gt_dir + image_file[len(prefix):-len(suffix)] +
                          "gtFine_labelTrainIds.png")
            assert os.path.isfile(
                label_file
            ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py"  # noqa

            json_file = gt_dir + image_file[
                len(prefix):-len(suffix)] + "gtFine_polygons.json"

            with megfile.smart_open(json_file, "r") as f:
                jsonobj = json.load(f)
            ret.append({
                "file_name": image_file,
                "sem_seg_file_name": label_file,
                "height": jsonobj["imgHeight"],
                "width": jsonobj["imgWidth"],
            })
        return ret
Пример #6
0
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
    """
    Converts dataset into COCO format and saves it to a json file.
    dataset_name must be registered in DatasetCatalog and in cvpods's standard format.
    Args:
        dataset_name:
            reference from the config file to the catalogs
            must be registered in DatasetCatalog and in cvpods's standard format
        output_file: path of json file that will be saved to
        allow_cached: if json file is already present then skip conversion
    """

    # TODO: The dataset or the conversion script *may* change,
    # a checksum would be useful for validating the cached data

    ensure_dir(os.path.dirname(output_file))
    with file_lock(output_file):
        if megfile.smart_exists(output_file) and allow_cached:
            logger.info(
                f"Cached annotations in COCO format already exist: {output_file}"
            )
        else:
            logger.info(
                f"Converting dataset annotations in '{dataset_name}' to COCO format ...)"
            )
            coco_dict = convert_to_coco_dict(dataset_name)

            with megfile.smart_open(output_file, "w") as json_file:
                logger.info(
                    f"Caching annotations in COCO format: {output_file}")
                json.dump(coco_dict, json_file)
Пример #7
0
    def _load_annotations(self):
        """
        Load Pascal VOC detection annotations to cvpods format.

        Args:
            dirname: Contain "Annotations", "ImageSets", "JPEGImages"
            split (str): one of "train", "test", "val", "trainval"
        """

        dirname = self.image_root
        split = self.split

        with megfile.smart_open(
                megfile.smart_path_join(dirname, "ImageSets", "Main",
                                        split + ".txt")) as f:
            fileids = np.loadtxt(f, dtype=np.str)

        dicts = []
        for fileid in fileids:
            anno_file = os.path.join(dirname, "Annotations", fileid + ".xml")
            jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")

            tree = ET.parse(anno_file)

            r = {
                "file_name": jpeg_file,
                "image_id": fileid,
                "height": int(tree.findall("./size/height")[0].text),
                "width": int(tree.findall("./size/width")[0].text),
            }
            instances = []

            for obj in tree.findall("object"):
                cls = obj.find("name").text
                # We include "difficult" samples in training.
                # Based on limited experiments, they don't hurt accuracy.
                # difficult = int(obj.find("difficult").text)
                # if difficult == 1:
                # continue
                bbox = obj.find("bndbox")
                bbox = [
                    float(bbox.find(x).text)
                    for x in ["xmin", "ymin", "xmax", "ymax"]
                ]
                # Original annotations are integers in the range [1, W or H]
                # Assuming they mean 1-based pixel indices (inclusive),
                # a box with annotation (xmin=1, xmax=W) covers the whole image.
                # In coordinate space this is represented by (xmin=0, xmax=W)
                bbox[0] -= 1.0
                bbox[1] -= 1.0
                instances.append({
                    "category_id": CLASS_NAMES.index(cls),
                    "bbox": bbox,
                    "bbox_mode": BoxMode.XYXY_ABS
                })
            r["annotations"] = instances
            dicts.append(r)

        return dicts
Пример #8
0
    def __getitem__(self, index):
        """Load data, apply transforms, converto to Instances.
        """
        dataset_dict = copy.deepcopy(self.dataset_dicts[index])

        # read image
        image = read_image(dataset_dict["file_name"], format=self.data_format)
        check_image_size(dataset_dict, image)

        if "annotations" in dataset_dict:
            annotations = dataset_dict.pop("annotations")
            annotations = [
                ann for ann in annotations if ann.get("iscrowd", 0) == 0
            ]
        else:
            annotations = None

        if "sem_seg_file_name" in dataset_dict:
            assert annotations is None
            annotations = []
            with megfile.smart_open(dataset_dict.get("sem_seg_file_name"),
                                    "rb") as f:
                sem_seg_gt = Image.open(f)
                sem_seg_gt = np.asarray(sem_seg_gt, dtype="uint8")
            annotations.append({"sem_seg": sem_seg_gt})

        # apply transfrom
        image, annotations = self._apply_transforms(image, annotations)

        if "sem_seg_file_name" in dataset_dict:
            dataset_dict.pop("sem_seg_file_name")
            sem_seg_gt = annotations[0].pop("sem_seg")
            sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
            dataset_dict["sem_seg"] = sem_seg_gt
            annotations = None

        if annotations is not None:
            image_shape = image.shape[:2]  # h, w

            instances = annotations_to_instances(annotations,
                                                 image_shape,
                                                 mask_format=self.mask_format)

            # # Create a tight bounding box from masks, useful when image is cropped
            # if self.crop_gen and instances.has("gt_masks"):
            #     instances.gt_boxes = instances.gt_masks.get_bounding_boxes()

            dataset_dict["instances"] = filter_empty_instances(instances)

        # convert to Instance type
        # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
        # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
        # Therefore it's important to use torch.Tensor.
        # h, w, c -> c, h, w
        dataset_dict["image"] = torch.as_tensor(
            np.ascontiguousarray(image.transpose(2, 0, 1)))

        return dataset_dict
Пример #9
0
    def evaluate(self):
        comm.synchronize()

        self._predictions = comm.gather(self._predictions)
        self._predictions = list(itertools.chain(*self._predictions))
        if not comm.is_main_process():
            return

        # gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
        gt_json = self._metadata.panoptic_json
        gt_folder = self._metadata.panoptic_root

        with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
            logger.info(
                "Writing all panoptic predictions to {} ...".format(pred_dir))
            for p in self._predictions:
                with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
                    f.write(p.pop("png_string"))

            with open(gt_json, "r") as f:
                json_data = json.load(f)
            json_data["annotations"] = self._predictions
            with megfile.smart_open(self._predictions_json, "w") as f:
                f.write(json.dumps(json_data))

            from panopticapi.evaluation import pq_compute

            with contextlib.redirect_stdout(io.StringIO()):
                pq_res = pq_compute(
                    gt_json,
                    self._predictions_json,
                    gt_folder=gt_folder,
                    pred_folder=pred_dir,
                )

        res = {}
        res["PQ"] = 100 * pq_res["All"]["pq"]
        res["SQ"] = 100 * pq_res["All"]["sq"]
        res["RQ"] = 100 * pq_res["All"]["rq"]
        res["PQ_th"] = 100 * pq_res["Things"]["pq"]
        res["SQ_th"] = 100 * pq_res["Things"]["sq"]
        res["RQ_th"] = 100 * pq_res["Things"]["rq"]
        res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
        res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
        res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]

        results = OrderedDict({"panoptic_seg": res})
        table = _print_panoptic_results(pq_res)

        if self._dump:
            dump_info_one_task = {
                "task": "panoptic_seg",
                "tables": [table],
            }
            _dump_to_markdown([dump_info_one_task])

        return results
Пример #10
0
    def tag_last_checkpoint(self, last_filename_basename: str):
        """
        Tag the last checkpoint.

        Args:
            last_filename_basename (str): the basename of the last filename.
        """
        save_file = os.path.join(self.save_dir, "last_checkpoint")
        with megfile.smart_open(save_file, "w") as f:
            f.write(last_filename_basename)
Пример #11
0
 def __init__(self, json_file, window_size=20):
     """
     Args:
         json_file (str): path to the json file. New data will be appended if the file exists.
         window_size (int): the window size of median smoothing for the scalars whose
             `smoothing_hint` are True.
     """
     self._file_handle = megfile.smart_open(json_file, "a")
     self._window_size = window_size
     self._last_write = -1
Пример #12
0
def load_proposals_into_dataset(dataset_dicts, proposal_file):
    r"""
    Load precomputed object proposals into the dataset.

    The proposal file should be a pickled dict with the following keys:

    - "ids": list[int] or list[str], the image ids
    - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
    - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
      corresponding to the boxes.
    - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.

    Args:
        dataset_dicts (list[dict]): annotations in cvpods Dataset format.
        proposal_file (str): file path of pre-computed proposals, in pkl format.

    Returns:
        list[dict]: the same format as dataset_dicts, but added proposal field.
    """
    logger.info("Loading proposals from: {}".format(proposal_file))

    with megfile.smart_open(proposal_file, "rb") as f:
        proposals = pickle.load(f, encoding="latin1")

    # Rename the key names in D1 proposal files
    rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
    for key in rename_keys:
        if key in proposals:
            proposals[rename_keys[key]] = proposals.pop(key)

    # Fetch the indexes of all proposals that are in the dataset
    # Convert image_id to str since they could be int.
    img_ids = set({str(record["image_id"]) for record in dataset_dicts})
    id_to_index = {
        str(id): i
        for i, id in enumerate(proposals["ids"]) if str(id) in img_ids
    }

    # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
    bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS

    for record in dataset_dicts:
        # Get the index of the proposal
        i = id_to_index[str(record["image_id"])]

        boxes = proposals["boxes"][i]
        objectness_logits = proposals["objectness_logits"][i]
        # Sort the proposals in descending order of the scores
        inds = objectness_logits.argsort()[::-1]
        record["proposal_boxes"] = boxes[inds]
        record["proposal_objectness_logits"] = objectness_logits[inds]
        record["proposal_bbox_mode"] = bbox_mode

    return dataset_dicts
Пример #13
0
    def _eval_predictions(self, tasks):
        """
        Evaluate self._predictions on the given tasks.
        Fill self._results with the metrics of the tasks.
        """
        logger.info("Preparing results for COCO format ...")
        self._coco_results = list(
            itertools.chain(*[x["instances"] for x in self._predictions]))

        # unmap the category ids for COCO
        if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
            reverse_id_mapping = {
                v: k
                for k, v in
                self._metadata.thing_dataset_id_to_contiguous_id.items()
            }
            for result in self._coco_results:
                category_id = result["category_id"]
                assert (
                    category_id in reverse_id_mapping
                ), "A prediction has category_id={}, which is not available in the dataset.".format(
                    category_id)
                result["category_id"] = reverse_id_mapping[category_id]

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "coco_instances_results.json")
            logger.info("Saving results to {}".format(file_path))
            with megfile.smart_open(file_path, "w") as f:
                f.write(json.dumps(self._coco_results))
                f.flush()

        if not self._do_evaluation:
            logger.info("Annotations are not available for evaluation.")
            return

        logger.info("Evaluating predictions ...")
        for task in sorted(tasks):
            coco_eval, summary = (
                _evaluate_predictions_on_coco(
                    self._coco_api,
                    self._coco_results,
                    task,
                    kpt_oks_sigmas=self._kpt_oks_sigmas)
                if len(self._coco_results) > 0 else
                None  # cocoapi does not handle empty results very well
            )
            logger.info("\n" + summary.getvalue())
            res = self._derive_coco_results(
                coco_eval,
                task,
                summary,
                class_names=self._metadata.thing_classes)
            self._results[task] = res
Пример #14
0
    def open(path: str, mode: str = "r"):
        """
        Open a stream to a URI, similar to the built-in `open`.

        Args:
            path (str): A URI supported by this PathHandler

        Returns:
            file: a file-like object.
        """
        return megfile.smart_open(path, mode)
Пример #15
0
    def inner_func(path, *args, **kwargs):
        cache_dir = get_cache_dir()
        protocol, path_without_protocol = megfile.SmartPath._extract_protocol(
            path)
        local_path = os.path.join(cache_dir, path_without_protocol)
        if megfile.smart_exists(local_path):  # already cached
            return megfile.smart_open(local_path, *args, **kwargs)

        # caching logic
        if protocol == "s3":
            with file_lock(local_path):
                megfile.s3_download(path, local_path)
        elif protocol == "http" or protocol == "https":
            with file_lock(local_path):
                if not isinstance(path, str):
                    path = path.abspath()
                download(path,
                         os.path.dirname(local_path),
                         filename=os.path.basename(local_path))

        return megfile.smart_open(local_path, *args, **kwargs)
Пример #16
0
 def _load_file(self, filename):
     """
     Args:
         filename (str): load checkpoint file from local or oss. checkpoint can be of type
             pkl, pth
     """
     if filename.endswith(".pkl"):
         with megfile.smart_open(filename, "rb") as f:
             data = pickle.load(f, encoding="latin1")
         if "model" in data and "__author__" in data:
             # file is in cvpods model zoo format
             logger.info("Reading a file from '{}'".format(
                 data["__author__"]))
             return data
         else:
             # assume file is from Caffe2 / Detectron1 model zoo
             if "blobs" in data:
                 # Detection models have "blobs", but ImageNet models don't
                 data = data["blobs"]
             data = {
                 k: v
                 for k, v in data.items() if not k.endswith("_momentum")
             }
             return {
                 "model": data,
                 "__author__": "Caffe2",
                 "matching_heuristics": True
             }
     elif filename.endswith(".pth"):
         if filename.startswith("s3://"):
             with megfile.smart_open(filename, "rb") as f:
                 loaded = torch.load(f, map_location=torch.device("cpu"))
         else:
             loaded = super()._load_file(
                 filename)  # load native pth checkpoint
         if "model" not in loaded:
             loaded = {"model": loaded}
         return loaded
Пример #17
0
 def get_checkpoint_file(self):
     """
     Returns:
         str: The latest checkpoint file in target directory.
     """
     save_file = os.path.join(self.save_dir, "last_checkpoint")
     try:
         with megfile.smart_open(save_file, "r") as f:
             last_saved = f.read().strip()
     except IOError:
         # if file doesn't exist, maybe because it has just been
         # deleted by a separate process
         return ""
     return os.path.join(self.save_dir, last_saved)
Пример #18
0
def build_darknet_backbone(cfg, input_shape):
    depth = cfg.MODEL.DARKNET.DEPTH
    stem_channels = cfg.MODEL.DARKNET.STEM_OUT_CHANNELS
    output_features = cfg.MODEL.DARKNET.OUT_FEATURES

    model = Darknet(depth, input_shape.channels, stem_channels,
                    output_features)
    filename = cfg.MODEL.DARKNET.WEIGHTS
    if filename.startswith("s3://"):
        with megfile.smart_open(filename, "rb") as f:
            state_dict = torch.load(f, map_location='cpu')
    model.load_state_dict(state_dict)

    return model
Пример #19
0
    def _eval_predictions(self, tasks):
        """
        Evaluate self._predictions on the given tasks.
        Fill self._results with the metrics of the tasks.
        """
        logger.info("Preparing results for COCO format ...")
        self._coco_results = list(
            itertools.chain(*[x["instances"] for x in self._predictions]))

        # unmap the category ids for COCO
        if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
            reverse_id_mapping = {
                v: k
                for k, v in
                self._metadata.thing_dataset_id_to_contiguous_id.items()
            }
            for result in self._coco_results:
                result["category_id"] = reverse_id_mapping[
                    result["category_id"]]

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "coco_instances_results.json")
            logger.info("Saving results to {}".format(file_path))
            with megfile.smart_open(file_path, "w") as f:
                f.write(json.dumps(self._coco_results))
                f.flush()

        if not self._do_evaluation:
            logger.info("Annotations are not available for evaluation.")
            return

        logger.info("Evaluating predictions ...")
        for task in sorted(tasks):
            assert task == "bbox", "Task {} is not supported".format(task)
            coco_eval = (
                self._evaluate_predictions_on_coco(self._coco_api,
                                                   self._coco_results)
                if len(self._coco_results) > 0 else
                None  # cocoapi does not handle empty results very well
            )

            res = self._derive_coco_results(
                coco_eval,
                task,
                class_names=self._metadata.get("thing_classes"))
            self._results[task] = res
Пример #20
0
    def _eval_box_proposals(self):
        """
        Evaluate the box proposals in self._predictions.
        Fill self._results with the metrics for "box_proposals" task.
        """
        if self._output_dir:
            # Saving generated box proposals to file.
            # Predicted box_proposals are in XYXY_ABS mode.
            bbox_mode = BoxMode.XYXY_ABS.value
            ids, boxes, objectness_logits = [], [], []
            for prediction in self._predictions:
                ids.append(prediction["image_id"])
                boxes.append(
                    prediction["proposals"].proposal_boxes.tensor.numpy())
                objectness_logits.append(
                    prediction["proposals"].objectness_logits.numpy())

            proposal_data = {
                "boxes": boxes,
                "objectness_logits": objectness_logits,
                "ids": ids,
                "bbox_mode": bbox_mode,
            }
            with megfile.smart_open(
                    os.path.join(self._output_dir, "box_proposals.pkl"),
                    "wb") as f:
                pickle.dump(proposal_data, f)

        if not self._do_evaluation:
            logger.info("Annotations are not available for evaluation.")
            return

        logger.info("Evaluating bbox proposals ...")
        res = {}
        areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
        for limit in [100, 1000]:
            for area, suffix in areas.items():
                stats = _evaluate_box_proposals(self._predictions,
                                                self._coco_api,
                                                area=area,
                                                limit=limit)
                key = "AR{}@{:d}".format(suffix, limit)
                res[key] = float(stats["ar"].item() * 100)
        logger.info("Proposal metrics: \n" + create_small_table(res))
        self._results["box_proposals"] = res
Пример #21
0
    def _eval_predictions(self, tasks):
        """
        Evaluate self._predictions on the given tasks.
        Fill self._results with the metrics of the tasks.
        """
        logger.info("Preparing results in the LVIS format ...")
        self._lvis_results = list(itertools.chain(*[x["instances"] for x in self._predictions]))

        # LVIS evaluator can be used to evaluate results for COCO dataset categories.
        # In this case `_metadata` variable will have a field with COCO-specific category mapping.
        if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
            reverse_id_mapping = {
                v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
            }
            for result in self._lvis_results:
                result["category_id"] = reverse_id_mapping[result["category_id"]]
        else:
            # unmap the category ids for LVIS (from 0-indexed to 1-indexed)
            for result in self._lvis_results:
                result["category_id"] += 1

        if self._output_dir:
            file_path = os.path.join(self._output_dir, "lvis_instances_results.json")
            logger.info("Saving results to {}".format(file_path))
            with megfile.smart_open(file_path, "w") as f:
                f.write(json.dumps(self._lvis_results))
                f.flush()

        if not self._do_evaluation:
            logger.info("Annotations are not available for evaluation.")
            return

        logger.info("Evaluating predictions ...")
        for task in sorted(tasks):
            lvis_eval, summary = (
                _evaluate_predictions_on_lvis(
                    self._lvis_api, self._lvis_results, task
                )
                if len(self._lvis_results) > 0
                else None
            )
            logger.info("\n" + summary.getvalue())
            res = self._derive_lvis_results(lvis_eval, task, summary)
            self._results[task] = res
Пример #22
0
def default_setup(cfg, args):
    """
    Perform some basic common setups at the beginning of a job, including:

    1. Set up the cvpods logger
    2. Log basic information about environment, cmdline arguments, and config
    3. Backup the config to the output directory

    Args:
        cfg (BaseConfig): the full config to be used
        args (argparse.NameSpace): the command line arguments to be logged
    """
    output_dir = cfg.OUTPUT_DIR
    if comm.is_main_process() and output_dir:
        ensure_dir(output_dir)

    rank = comm.get_rank()
    # setup_logger(output_dir, distributed_rank=rank, name="cvpods")
    setup_logger(output_dir, distributed_rank=rank)

    logger.info("Rank of current process: {}. World size: {}".format(
        rank, comm.get_world_size()))
    logger.info("Environment info:\n" + collect_env_info())

    logger.info("Command line arguments: " + str(args))
    if hasattr(args, "config_file") and args.config_file != "":
        logger.info("Contents of args.config_file={}:\n{}".format(
            args.config_file,
            megfile.smart_open(args.config_file, "r").read()))

    adjust_config(cfg)

    # make sure each worker has a different, yet deterministic seed if specified
    seed = seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)
    # save seed to config for dump
    cfg.SEED = seed

    # cudnn benchmark has large overhead. It shouldn't be used considering the small size of
    # typical validation set.
    if not (hasattr(args, "eval_only") and args.eval_only):
        torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK

    return cfg
Пример #23
0
def read_image(file_name, format=None):
    """
    Read an image into the given format.
    Will apply rotation and flipping if the image has such exif information.

    Args:
        file_name (str): image file path
        format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".

    Returns:
        image (np.ndarray): an HWC image in the given format, which is 0-255, uint8 for
            supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
    """
    with megfile.smart_open(file_name, "rb") as f:
        image = Image.open(f)
        if format == "RGB":
            image = image.convert(
                format)  # avoid warnings caused by _apply_exif_orientation
            return np.array(image)
        else:
            # work around this bug: https://github.com/python-pillow/Pillow/issues/3973
            image = _apply_exif_orientation(image)
            return convert_PIL_to_numpy(image, format)
Пример #24
0
    def evaluate(self):
        """
        Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):

        * Mean intersection-over-union averaged across classes (mIoU)
        * Frequency Weighted IoU (fwIoU)
        * Mean pixel accuracy averaged across classes (mACC)
        * Pixel Accuracy (pACC)
        """
        if self._distributed:
            comm.synchronize()
            conf_matrix_list = comm.all_gather(self._conf_matrix)
            self._predictions = comm.all_gather(self._predictions)
            self._predictions = list(itertools.chain(*self._predictions))
            if not comm.is_main_process():
                return

            self._conf_matrix = np.zeros_like(self._conf_matrix)
            for conf_matrix in conf_matrix_list:
                self._conf_matrix += conf_matrix

        if self._output_dir:
            ensure_dir(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "sem_seg_predictions.json")
            with megfile.smart_open(file_path, "w") as f:
                f.write(json.dumps(self._predictions))

        acc = np.zeros(self._num_classes, dtype=np.float)
        iou = np.zeros(self._num_classes, dtype=np.float)
        tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
        pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
        class_weights = pos_gt / np.sum(pos_gt)
        pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
        acc_valid = pos_gt > 0
        acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
        iou_valid = (pos_gt + pos_pred) > 0
        union = pos_gt + pos_pred - tp
        iou[acc_valid] = tp[acc_valid] / union[acc_valid]
        macc = np.sum(acc) / np.sum(acc_valid)
        miou = np.sum(iou) / np.sum(iou_valid)
        fiou = np.sum(iou * class_weights)
        pacc = np.sum(tp) / np.sum(pos_gt)

        res = {}
        res["mIoU"] = 100 * miou
        res["fwIoU"] = 100 * fiou
        res["mACC"] = 100 * macc
        res["pACC"] = 100 * pacc

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "sem_seg_evaluation.pth")
            with megfile.smart_open(file_path, "wb") as f:
                torch.save(res, f)
        results = OrderedDict({"sem_seg": res})

        small_table = create_small_table(res)
        logger.info("Evaluation results for sem_seg: \n" + small_table)

        if self._dump:
            dump_info_one_task = {
                "task": "sem_seg",
                "tables": [small_table],
            }
            _dump_to_markdown([dump_info_one_task])

        return results
Пример #25
0
 def is_file(self):
     d2_path = self.DETECTRON2_PREFIX + self.path_without_protocol
     return megfile.smart_open(d2_path)
Пример #26
0
 def open(self, mode: str = 'r', **kwargs) -> IO[AnyStr]:
     # TODO add cache logic
     d2_path = self.DETECTRON2_PREFIX + self.path_without_protocol
     return megfile.smart_open(d2_path, mode=mode, **kwargs)
Пример #27
0
 def open(self, mode="r", **kwargs):
     catalog_path = ModelCatalog.get(self.path_without_protocol)
     logger.info("Catalog entry {} points to {}".format(
         self.path, catalog_path))
     return megfile.smart_open(catalog_path, mode, **kwargs)
Пример #28
0
        "--config",
        required=True,
        help="path to a python file with a definition of `config`")
    parser.add_argument(
        "--dataset",
        help="name of the dataset. Use DATASETS.TEST[0] if not specified.",
        default="")
    parser.add_argument("--conf-threshold",
                        default=0.5,
                        type=float,
                        help="confidence threshold")
    args = parser.parse_args()

    logger = setup_logger()
    cfg = setup_cfg(args.config, logger)
    with megfile.smart_open(args.input, "r") as f:
        predictions = json.load(f)

    pred_by_image = defaultdict(list)
    for p in predictions:
        pred_by_image[p["image_id"]].append(p)

    # TODO: add DatasetCatalog, MetadataCatalog
    dataset = build_dataset(
        cfg, [args.dataset] if args.dataset else [cfg.DATASETS.TEST[0]],
        transforms=[],
        is_train=False)
    dicts = dataset.datasets[0].dataset_dicts
    metadata = dataset.meta
    if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
Пример #29
0
def cityscapes_files_to_dict(files, from_json, to_polygons):
    """
    Parse cityscapes annotation files to a dict.

    Args:
        files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
        from_json (bool): whether to read annotations from the raw json file or the png files.
        to_polygons (bool): whether to represent the segmentation as polygons
            (COCO's format) instead of masks (cityscapes's format).

    Returns:
        A dict in cvpods Dataset format.
    """
    from cityscapesscripts.helpers.labels import id2label, name2label

    image_file, instance_id_file, _, json_file = files

    annos = []

    if from_json:
        from shapely.geometry import MultiPolygon, Polygon

        with megfile.smart_open(json_file, "r") as f:
            jsonobj = json.load(f)
        ret = {
            "file_name": image_file,
            "image_id": os.path.basename(image_file),
            "height": jsonobj["imgHeight"],
            "width": jsonobj["imgWidth"],
        }

        # `polygons_union` contains the union of all valid polygons.
        polygons_union = Polygon()

        # CityscapesScripts draw the polygons in sequential order
        # and each polygon *overwrites* existing ones. See
        # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
        # We use reverse order, and each polygon *avoids* early ones.
        # This will resolve the ploygon overlaps in the same way as CityscapesScripts.
        for obj in jsonobj["objects"][::-1]:
            if "deleted" in obj:  # cityscapes data format specific
                continue
            label_name = obj["label"]

            try:
                label = name2label[label_name]
            except KeyError:
                if label_name.endswith("group"):  # crowd area
                    label = name2label[label_name[:-len("group")]]
                else:
                    raise
            if label.id < 0:  # cityscapes data format
                continue

            # Cityscapes's raw annotations uses integer coordinates
            # Therefore +0.5 here
            poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
            # CityscapesScript uses PIL.ImageDraw.polygon to rasterize
            # polygons for evaluation. This function operates in integer space
            # and draws each pixel whose center falls into the polygon.
            # Therefore it draws a polygon which is 0.5 "fatter" in expectation.
            # We therefore dilate the input polygon by 0.5 as our input.
            poly = Polygon(poly_coord).buffer(0.5, resolution=4)

            if not label.hasInstances or label.ignoreInEval:
                # even if we won't store the polygon it still contributes to overlaps resolution
                polygons_union = polygons_union.union(poly)
                continue

            # Take non-overlapping part of the polygon
            poly_wo_overlaps = poly.difference(polygons_union)
            if poly_wo_overlaps.is_empty:
                continue
            polygons_union = polygons_union.union(poly)

            anno = {}
            anno["iscrowd"] = label_name.endswith("group")
            anno["category_id"] = label.id

            if isinstance(poly_wo_overlaps, Polygon):
                poly_list = [poly_wo_overlaps]
            elif isinstance(poly_wo_overlaps, MultiPolygon):
                poly_list = poly_wo_overlaps.geoms
            else:
                raise NotImplementedError(
                    "Unknown geometric structure {}".format(poly_wo_overlaps))

            poly_coord = []
            for poly_el in poly_list:
                # COCO API can work only with exterior boundaries now, hence we store only them.
                # TODO: store both exterior and interior boundaries once other parts of the
                # codebase support holes in polygons.
                poly_coord.append(list(chain(*poly_el.exterior.coords)))
            anno["segmentation"] = poly_coord
            (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds

            anno["bbox"] = (xmin, ymin, xmax, ymax)
            anno["bbox_mode"] = BoxMode.XYXY_ABS

            annos.append(anno)
    else:
        # See also the official annotation parsing scripts at
        # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py  # noqa
        with megfile.smart_open(instance_id_file, "rb") as f:
            inst_image = np.asarray(Image.open(f), order="F")
        # ids < 24 are stuff labels (filtering them first is about 5% faster)
        flattened_ids = np.unique(inst_image[inst_image >= 24])

        ret = {
            "file_name": image_file,
            "image_id": os.path.basename(image_file),
            "height": inst_image.shape[0],
            "width": inst_image.shape[1],
        }

        for instance_id in flattened_ids:
            # For non-crowd annotations, instance_id // 1000 is the label_id
            # Crowd annotations have <1000 instance ids
            label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
            label = id2label[label_id]
            if not label.hasInstances or label.ignoreInEval:
                continue

            anno = {}
            anno["iscrowd"] = instance_id < 1000
            anno["category_id"] = label.id

            mask = np.asarray(inst_image == instance_id,
                              dtype=np.uint8,
                              order="F")

            inds = np.nonzero(mask)
            ymin, ymax = inds[0].min(), inds[0].max()
            xmin, xmax = inds[1].min(), inds[1].max()
            anno["bbox"] = (xmin, ymin, xmax, ymax)
            if xmax <= xmin or ymax <= ymin:
                continue
            anno["bbox_mode"] = BoxMode.XYXY_ABS
            if to_polygons:
                # This conversion comes from D4809743 and D5171122,
                # when Mask-RCNN was first developed.
                contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_NONE)[-2]
                polygons = [
                    c.reshape(-1).tolist() for c in contours if len(c) >= 3
                ]
                # opencv's can produce invalid polygons
                if len(polygons) == 0:
                    continue
                anno["segmentation"] = polygons
            else:
                anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
            annos.append(anno)
    ret["annotations"] = annos
    return ret