Example #1
0
def get_valid_files(args, cfg, logger):

    if "MODEL.WEIGHTS" in args.opts:
        model_weights = cfg.MODEL.WEIGHTS
        assert PathManager.exists(model_weights), "{} not exist!!!".format(
            model_weights)
        return [model_weights]

    file_list = glob.glob(os.path.join(cfg.OUTPUT_DIR, "model_*.pth"))
    if len(file_list) == 0:  # local file invalid, get it from oss
        model_prefix = cfg.OUTPUT_DIR.split("cvpods_playground")[-1][1:]
        remote_file_path = os.path.join(cfg.OSS.DUMP_PREFIX, model_prefix)
        logger.warning(
            "No checkpoint file was found locally, try to "
            f"load the corresponding dump file on OSS site: {remote_file_path}."
        )
        file_list = [
            str(filename) for filename in PathManager.ls(remote_file_path)
            if re.match(r"model_.+\.pth", filename.name) is not None
        ]
        assert len(file_list) != 0, "No valid file found on OSS"

    file_list = filter_by_iters(file_list, args.start_iter, args.end_iter)
    assert file_list, "No checkpoint valid in {}.".format(cfg.OUTPUT_DIR)
    logger.info("All files below will be tested in order:\n{}".format(
        pformat(file_list)))
    return file_list
Example #2
0
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
    """
    Converts dataset into COCO format and saves it to a json file.
    dataset_name must be registered in DatasetCatalog and in cvpods's standard format.
    Args:
        dataset_name:
            reference from the config file to the catalogs
            must be registered in DatasetCatalog and in cvpods's standard format
        output_file: path of json file that will be saved to
        allow_cached: if json file is already present then skip conversion
    """

    # TODO: The dataset or the conversion script *may* change,
    # a checksum would be useful for validating the cached data

    PathManager.mkdirs(os.path.dirname(output_file))
    with file_lock(output_file):
        if PathManager.exists(output_file) and allow_cached:
            logger.info(
                f"Cached annotations in COCO format already exist: {output_file}"
            )
        else:
            logger.info(
                f"Converting dataset annotations in '{dataset_name}' to COCO format ...)"
            )
            coco_dict = convert_to_coco_dict(dataset_name)

            with PathManager.open(output_file, "w") as json_file:
                logger.info(
                    f"Caching annotations in COCO format: {output_file}")
                json.dump(coco_dict, json_file)
Example #3
0
 def _load_file(self, filename):
     """
     Args:
         filename (str): load checkpoint file from local or oss. checkpoint can be of type
             pkl, pth
     """
     if filename.endswith(".pkl"):
         with PathManager.open(filename, "rb") as f:
             data = pickle.load(f, encoding="latin1")
         if "model" in data and "__author__" in data:
             # file is in cvpods model zoo format
             self.logger.info("Reading a file from '{}'".format(data["__author__"]))
             return data
         else:
             # assume file is from Caffe2 / Detectron1 model zoo
             if "blobs" in data:
                 # Detection models have "blobs", but ImageNet models don't
                 data = data["blobs"]
             data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
             return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
     elif filename.endswith(".pth"):
         if filename.startswith("s3://"):
             with PathManager.open(filename, "rb") as f:
                 loaded = torch.load(f, map_location=torch.device("cpu"))
         else:
             loaded = super()._load_file(filename)  # load native pth checkpoint
         if "model" not in loaded:
             loaded = {"model": loaded}
         return loaded
Example #4
0
    def evaluate(self):
        if self._distributed:
            comm.synchronize()
            self._predictions = comm.gather(self._predictions, dst=0)
            self._predictions = list(itertools.chain(*self._predictions))

            self._targets = comm.gather(self._targets, dst=0)
            self._targets = list(itertools.chain(*self._targets))

            if not comm.is_main_process():
                return {}

        if len(self._predictions) == 0:
            self._logger.warning("[ClassificationEvaluator] Did not receive valid predictions.")
            return {}

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir, "instances_predictions.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(self._predictions, f)

        self._results = OrderedDict()
        assert len(self._predictions) == len(self._targets)
        if self._predictions[0] is not None:
            self._eval_classification_accuracy()

        if self._dump:
            _dump_to_markdown(self._dump_infos)

        # Copy so the caller can do whatever with results
        return copy.deepcopy(self._results)
    def evaluate(self):
        if self._distributed:
            comm.synchronize()
            self._predictions = comm.gather(self._predictions, dst=0)
            self._predictions = list(itertools.chain(*self._predictions))

            if not comm.is_main_process():
                return {}

        if len(self._predictions) == 0:
            self._logger.warning(
                "[COCOEvaluator] Did not receive valid predictions.")
            return {}

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "instances_predictions.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(self._predictions, f)

        self._results = OrderedDict()
        self._eval_predictions(set(self._tasks))
        # Copy so the caller can do whatever with results
        return copy.deepcopy(self._results)
    def evaluate(self):
        comm.synchronize()

        self._predictions = comm.gather(self._predictions)
        self._predictions = list(itertools.chain(*self._predictions))
        if not comm.is_main_process():
            return

        gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
        gt_folder = self._metadata.panoptic_root

        with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
            logger.info(
                "Writing all panoptic predictions to {} ...".format(pred_dir))
            for p in self._predictions:
                with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
                    f.write(p.pop("png_string"))

            with open(gt_json, "r") as f:
                json_data = json.load(f)
            json_data["annotations"] = self._predictions
            with PathManager.open(self._predictions_json, "w") as f:
                f.write(json.dumps(json_data))

            from panopticapi.evaluation import pq_compute

            with contextlib.redirect_stdout(io.StringIO()):
                pq_res = pq_compute(
                    gt_json,
                    PathManager.get_local_path(self._predictions_json),
                    gt_folder=gt_folder,
                    pred_folder=pred_dir,
                )

        res = {}
        res["PQ"] = 100 * pq_res["All"]["pq"]
        res["SQ"] = 100 * pq_res["All"]["sq"]
        res["RQ"] = 100 * pq_res["All"]["rq"]
        res["PQ_th"] = 100 * pq_res["Things"]["pq"]
        res["SQ_th"] = 100 * pq_res["Things"]["sq"]
        res["RQ_th"] = 100 * pq_res["Things"]["rq"]
        res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
        res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
        res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]

        results = OrderedDict({"panoptic_seg": res})
        table = _print_panoptic_results(pq_res)

        if self._dump:
            dump_info_one_task = {
                "task": "panoptic_seg",
                "tables": [table],
            }
            _dump_to_markdown([dump_info_one_task])

        return results
Example #7
0
def default_setup(cfg, args):
    """
    Perform some basic common setups at the beginning of a job, including:

    1. Set up the cvpods logger
    2. Log basic information about environment, cmdline arguments, and config
    3. Backup the config to the output directory

    Args:
        cfg (BaseConfig): the full config to be used
        args (argparse.NameSpace): the command line arguments to be logged
    """
    output_dir = cfg.OUTPUT_DIR
    if comm.is_main_process() and output_dir:
        PathManager.mkdirs(output_dir)

    rank = comm.get_rank()
    # setup_logger(output_dir, distributed_rank=rank, name="cvpods")
    logger = setup_logger(output_dir, distributed_rank=rank)

    logger.info("Rank of current process: {}. World size: {}".format(
        rank, comm.get_world_size()))
    logger.info("Environment info:\n" + collect_env_info())

    logger.info("Command line arguments: " + str(args))
    if hasattr(args, "config_file") and args.config_file != "":
        logger.info("Contents of args.config_file={}:\n{}".format(
            args.config_file,
            PathManager.open(args.config_file, "r").read()))

    adjust_config(cfg)
    logger.info("Running with full config:\n{}".format(cfg))
    base_config = cfg.__class__.__base__()
    logger.info("different config with base class:\n{}".format(
        cfg.diff(base_config)))
    # if comm.is_main_process() and output_dir:
    #     # Note: some of our scripts may expect the existence of
    #     # config.yaml in output directory
    #     path = os.path.join(output_dir, "config.yaml")
    #     with PathManager.open(path, "w") as f:
    #         f.write(cfg.dump())
    #     logger.info("Full config saved to {}".format(os.path.abspath(path)))

    # make sure each worker has a different, yet deterministic seed if specified

    seed = seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)
    # save seed to config for dump
    cfg.SEED = seed

    # cudnn benchmark has large overhead. It shouldn't be used considering the small size of
    # typical validation set.
    if not (hasattr(args, "eval_only") and args.eval_only):
        torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK

    return cfg, logger
Example #8
0
 def get_all_checkpoint_files(self):
     """
     Returns:
         list: All available checkpoint files (.pth files) in target
             directory.
     """
     all_model_checkpoints = [
         os.path.join(self.save_dir, file)
         for file in PathManager.ls(self.save_dir)
         if PathManager.isfile(os.path.join(self.save_dir, file)) and file.endswith(".pth")
     ]
     return all_model_checkpoints
Example #9
0
    def save(self, name: str, tag_checkpoint: bool = True, **kwargs: dict):
        """
        Dump model and checkpointables to a file.

        Args:
            name (str): name of the file.
            kwargs (dict): extra arbitrary data to save.
        """
        if not self.save_dir or not self.save_to_disk:
            return

        data = {}
        data["model"] = self.model.state_dict()
        for key, obj in self.checkpointables.items():
            data[key] = obj.state_dict()
        data.update(kwargs)

        basename = "{}.pth".format(name)
        save_file = os.path.join(self.save_dir, basename)
        assert os.path.basename(save_file) == basename, basename
        self.logger.info("Saving checkpoint to {}".format(save_file))
        with PathManager.open(save_file, "wb") as f:
            torch.save(data, f)

        if tag_checkpoint:
            self.tag_last_checkpoint(basename)
Example #10
0
    def process(self, inputs, outputs):
        """
        Args:
            inputs: the inputs to a model.
                It is a list of dicts. Each dict corresponds to an image and
                contains keys like "height", "width", "file_name".
            outputs: the outputs of a model. It is either list of semantic segmentation predictions
                (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
                segmentation prediction in the same format.
        """
        for input, output in zip(inputs, outputs):
            output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
            pred = np.array(output, dtype=np.int)
            with PathManager.open(
                    self.input_file_to_gt_file[input["file_name"]], "rb") as f:
                gt = np.array(Image.open(f), dtype=np.int)

            gt[gt == self._ignore_label] = self._num_classes

            self._conf_matrix += np.bincount(
                self._N * pred.reshape(-1) + gt.reshape(-1),
                minlength=self._N**2).reshape(self._N, self._N)

            self._predictions.extend(
                self.encode_json_sem_seg(pred, input["file_name"]))
    def _eval_longtail_subgroup_accuracy(self, preds, target):
        # category_frequency_file = os.path.join(dataset_path,'category_frequency.json')
        with PathManager.open(self._longtail_json, 'r') as f:
            category_frequency = json.load(f)

        many_cats = category_frequency['many_cats']
        medium_cats = category_frequency['medium_cats']
        low_cats = category_frequency['low_cats']

        cat_indicator = torch.zeros(len(self._metadata.thing_classes))
        cat_indicator[many_cats] = 1
        cat_indicator[medium_cats] = 2
        cat_indicator[low_cats] = 3

        labels_group_ids = cat_indicator[target]
        labels_many = target[labels_group_ids == 1]
        labels_medium = target[labels_group_ids == 2]
        labels_low = target[labels_group_ids == 3]

        preds_many = preds[:, labels_group_ids == 1]
        preds_medium = preds[:, labels_group_ids == 2]
        preds_low = preds[:, labels_group_ids == 3]

        many_topks_correct = self._accuracy(preds_many, labels_many)
        medium_topks_correct = self._accuracy(preds_medium, labels_medium)
        low_topks_correct = self._accuracy(preds_low, labels_low)

        top_acc_subgroups = [
            many_topks_correct, medium_topks_correct, low_topks_correct
        ]

        return top_acc_subgroups
Example #12
0
    def _load_semantic_annotations(self, image_dir, gt_dir):
        """
        Args:
            image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
            gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".

        Returns:
            list[dict]: a list of dict, each has "file_name" and
                "sem_seg_file_name".
        """
        ret = []
        for image_file in glob.glob(os.path.join(image_dir, "**/*.png")):
            suffix = "leftImg8bit.png"
            assert image_file.endswith(suffix)
            prefix = image_dir

            label_file = (gt_dir + image_file[len(prefix):-len(suffix)] +
                          "gtFine_labelTrainIds.png")
            assert os.path.isfile(
                label_file
            ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py"  # noqa

            json_file = gt_dir + image_file[
                len(prefix):-len(suffix)] + "gtFine_polygons.json"

            with PathManager.open(json_file, "r") as f:
                jsonobj = json.load(f)
            ret.append({
                "file_name": image_file,
                "sem_seg_file_name": label_file,
                "height": jsonobj["imgHeight"],
                "width": jsonobj["imgWidth"],
            })
        return ret
Example #13
0
def filter_by_iters(file_list, start_iter, end_iter):
    # sort file_list by modified time
    if file_list[0].startswith("s3://"):
        file_list.sort(key=lambda x: PathManager.stat(x).m_date)
    else:
        file_list.sort(key=os.path.getmtime)

    if start_iter is None:
        if end_iter is None:
            # use latest ckpt if start_iter and end_iter are not given
            return [file_list[-1]]
        else:
            start_iter = 0
    elif end_iter is None:
        end_iter = float("inf")

    iter_infos = [re.split(r"model_|\.pth", f)[-2] for f in file_list]
    keep_list = [0] * len(iter_infos)
    start_index = 0
    if "final" in iter_infos and iter_infos[-1] != "final":
        start_index = iter_infos.index("final")

    for i in range(len(iter_infos) - 1, start_index, -1):
        if iter_infos[i] == "final":
            if end_iter == float("inf"):
                keep_list[i] = 1
        elif float(start_iter) < float(iter_infos[i]) < float(end_iter):
            keep_list[i] = 1
            if float(iter_infos[i - 1]) > float(iter_infos[i]):
                break

    return [
        filename for keep, filename in zip(keep_list, file_list) if keep == 1
    ]
Example #14
0
 def has_checkpoint(self):
     """
     Returns:
         bool: whether a checkpoint exists in the target directory.
     """
     save_file = os.path.join(self.save_dir, "last_checkpoint")
     return PathManager.exists(save_file)
    def _eval_predictions(self, tasks):
        """
        Evaluate self._predictions on the given tasks.
        Fill self._results with the metrics of the tasks.
        """
        self._logger.info("Preparing results for CrowdHuman format ...")
        self._coco_results = self._predictions

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "coco_instances_results.json")
            self._logger.info("Saving results to {}".format(file_path))

            with PathManager.open(file_path, "w") as f:
                for db in self._coco_results:
                    line = json.dumps(db) + '\n'
                    f.write(line)

        self._logger.info("Evaluating predictions ...")
        for task in sorted(tasks):
            coco_eval = (
                _evaluate_predictions_on_crowdhuman(self._metadata.json_file,
                                                    file_path)
                if len(self._coco_results) > 0 else
                None  # cocoapi does not handle empty results very well
            )
            res = self._derive_coco_results(coco_eval, task)
            self._results[task] = res
Example #16
0
    def __init__(self,
                 dataset_name,
                 meta,
                 cfg,
                 distributed,
                 output_dir=None,
                 dump=False):
        """
        Args:
            dataset_name (str): name of the dataset to be evaluated.
                It must have either the following corresponding metadata:

                    "json_file": the path to the COCO format annotation

                Or it must be in cvpods's standard dataset format
                so it can be converted to COCO format automatically.
            meta (SimpleNamespace): dataset metadata.
            cfg (config dict): cvpods Config instance.
            distributed (True): if True, will collect results from all ranks for evaluation.
                Otherwise, will evaluate the results in the current process.
            output_dir (str): optional, an output directory to dump all
                results predicted on the dataset. The dump contains two files:

                1. "instance_predictions.pth" a file in torch serialization
                   format that contains all the raw original predictions.
                2. "coco_instances_results.json" a json file in COCO's result
                   format.

            dump (bool): If True, after the evaluation is completed, a Markdown file
                that records the model evaluation metrics and corresponding scores
                will be generated in the working directory.
        """
        self._dump = dump
        self.cfg = cfg
        self._tasks = self._tasks_from_config(cfg)
        self._distributed = distributed
        self._output_dir = output_dir

        self._cpu_device = torch.device("cpu")
        self._logger = logging.getLogger(__name__)

        self._metadata = meta
        if not hasattr(self._metadata, "json_file"):
            self._logger.warning(
                f"json_file was not found in MetaDataCatalog for '{dataset_name}'."
                " Trying to convert it to COCO format ...")

            cache_path = os.path.join(output_dir,
                                      f"{dataset_name}_coco_format.json")
            self._metadata.json_file = cache_path
            convert_to_coco_json(dataset_name, cache_path)

        json_file = PathManager.get_local_path(self._metadata.json_file)
        with contextlib.redirect_stdout(io.StringIO()):
            self._coco_api = COCO(json_file)

        self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS
        # Test set json files do not contain annotations (evaluation must be
        # performed using the COCO evaluation server).
        self._do_evaluation = "annotations" in self._coco_api.dataset
Example #17
0
    def _load_annotations(self):
        """
        Load Pascal VOC detection annotations to cvpods format.

        Args:
            dirname: Contain "Annotations", "ImageSets", "JPEGImages"
            split (str): one of "train", "test", "val", "trainval"
        """

        dirname = self.image_root
        split = self.split

        with PathManager.open(
                os.path.join(dirname, "ImageSets", "Main",
                             split + ".txt")) as f:
            fileids = np.loadtxt(f, dtype=np.str)

        dicts = []
        for fileid in fileids:
            anno_file = os.path.join(dirname, "Annotations", fileid + ".xml")
            jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")

            tree = ET.parse(anno_file)

            r = {
                "file_name": jpeg_file,
                "image_id": fileid,
                "height": int(tree.findall("./size/height")[0].text),
                "width": int(tree.findall("./size/width")[0].text),
            }
            instances = []

            for obj in tree.findall("object"):
                cls = obj.find("name").text
                # We include "difficult" samples in training.
                # Based on limited experiments, they don't hurt accuracy.
                # difficult = int(obj.find("difficult").text)
                # if difficult == 1:
                # continue
                bbox = obj.find("bndbox")
                bbox = [
                    float(bbox.find(x).text)
                    for x in ["xmin", "ymin", "xmax", "ymax"]
                ]
                # Original annotations are integers in the range [1, W or H]
                # Assuming they mean 1-based pixel indices (inclusive),
                # a box with annotation (xmin=1, xmax=W) covers the whole image.
                # In coordinate space this is represented by (xmin=0, xmax=W)
                bbox[0] -= 1.0
                bbox[1] -= 1.0
                instances.append({
                    "category_id": CLASS_NAMES.index(cls),
                    "bbox": bbox,
                    "bbox_mode": BoxMode.XYXY_ABS
                })
            r["annotations"] = instances
            dicts.append(r)

        return dicts
Example #18
0
    def __getitem__(self, index):
        """Load data, apply transforms, converto to Instances.
        """
        dataset_dict = copy.deepcopy(self.dataset_dicts[index])

        # read image
        image = read_image(dataset_dict["file_name"], format=self.data_format)
        check_image_size(dataset_dict, image)

        if "annotations" in dataset_dict:
            annotations = dataset_dict.pop("annotations")
            annotations = [
                ann for ann in annotations if ann.get("iscrowd", 0) == 0
            ]
        else:
            annotations = None

        if "sem_seg_file_name" in dataset_dict:
            assert annotations is None
            annotations = []
            with PathManager.open(dataset_dict.get("sem_seg_file_name"),
                                  "rb") as f:
                sem_seg_gt = Image.open(f)
                sem_seg_gt = np.asarray(sem_seg_gt, dtype="uint8")
            annotations.append({"sem_seg": sem_seg_gt})

        # apply transfrom
        image, annotations = self._apply_transforms(image, annotations)

        if "sem_seg_file_name" in dataset_dict:
            dataset_dict.pop("sem_seg_file_name")
            sem_seg_gt = annotations[0].pop("sem_seg")
            sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
            dataset_dict["sem_seg"] = sem_seg_gt
            annotations = None

        if annotations is not None:
            image_shape = image.shape[:2]  # h, w

            instances = annotations_to_instances(annotations,
                                                 image_shape,
                                                 mask_format=self.mask_format)

            # # Create a tight bounding box from masks, useful when image is cropped
            # if self.crop_gen and instances.has("gt_masks"):
            #     instances.gt_boxes = instances.gt_masks.get_bounding_boxes()

            dataset_dict["instances"] = filter_empty_instances(instances)

        # convert to Instance type
        # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
        # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
        # Therefore it's important to use torch.Tensor.
        # h, w, c -> c, h, w
        dataset_dict["image"] = torch.as_tensor(
            np.ascontiguousarray(image.transpose(2, 0, 1)))

        return dataset_dict
Example #19
0
 def __init__(self, json_file, window_size=20):
     """
     Args:
         json_file (str): path to the json file. New data will be appended if the file exists.
         window_size (int): the window size of median smoothing for the scalars whose
             `smoothing_hint` are True.
     """
     self._file_handle = PathManager.open(json_file, "a")
     self._window_size = window_size
Example #20
0
def main(args):
    config.merge_from_list(args.opts)
    cfg, logger = default_setup(config, args)
    if args.debug:
        batches = int(cfg.SOLVER.IMS_PER_BATCH / 8 * args.num_gpus)
        if cfg.SOLVER.IMS_PER_BATCH != batches:
            cfg.SOLVER.IMS_PER_BATCH = batches
            logger.warning(
                "SOLVER.IMS_PER_BATCH is changed to {}".format(batches))

    if "MODEL.WEIGHTS" in args.opts:
        if cfg.MODEL.WEIGHTS.endswith(".pth") and not PathManager.exists(
                cfg.MODEL.WEIGHTS):
            ckpt_name = cfg.MODEL.WEIGHTS.split("/")[-1]
            model_prefix = cfg.OUTPUT_DIR.split("cvpods_playground")[1][1:]
            remote_file_path = os.path.join(cfg.OSS.DUMP_PREFIX, model_prefix,
                                            ckpt_name)
            logger.warning(
                f"The specified ckpt file ({cfg.MODEL.WEIGHTS}) was not found locally,"
                f" try to load the corresponding dump file on OSS ({remote_file_path})."
            )
            cfg.MODEL.WEIGHTS = remote_file_path
        valid_files = [cfg.MODEL.WEIGHTS]
    else:
        list_of_files = glob.glob(os.path.join(cfg.OUTPUT_DIR, '*.pth'))

        assert list_of_files, "No checkpoint file found in {}.".format(
            cfg.OUTPUT_DIR)
        list_of_files.sort(key=os.path.getctime)
        latest_file = list_of_files[-1]
        if not args.end_iter:
            valid_files = [latest_file]
        else:
            files = [f for f in list_of_files if str(f) <= str(latest_file)]
            valid_files = []
            for f in files:
                try:
                    model_iter = int(re.split(r'(model_|\.pth)', f)[-3])
                except Exception:
                    logger.warning("remove {}".format(f))
                    continue
                if args.start_iter <= model_iter <= args.end_iter:
                    valid_files.append(f)
            assert valid_files, "No .pth files satisfy your requirement"

    # * means all if need specific format then *.csv
    for current_file in valid_files:
        cfg.MODEL.WEIGHTS = current_file
        model = build_model(cfg)

        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = Trainer.test(cfg, model)
        if comm.is_main_process():
            verify_results(cfg, res)
        if cfg.TEST.AUG.ENABLED:
            res.update(Trainer.test_with_TTA(cfg, model))
Example #21
0
def load_proposals_into_dataset(dataset_dicts, proposal_file):
    r"""
    Load precomputed object proposals into the dataset.

    The proposal file should be a pickled dict with the following keys:

    - "ids": list[int] or list[str], the image ids
    - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
    - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
      corresponding to the boxes.
    - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.

    Args:
        dataset_dicts (list[dict]): annotations in cvpods Dataset format.
        proposal_file (str): file path of pre-computed proposals, in pkl format.

    Returns:
        list[dict]: the same format as dataset_dicts, but added proposal field.
    """
    logger = logging.getLogger(__name__)
    logger.info("Loading proposals from: {}".format(proposal_file))

    with PathManager.open(proposal_file, "rb") as f:
        proposals = pickle.load(f, encoding="latin1")

    # Rename the key names in D1 proposal files
    rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
    for key in rename_keys:
        if key in proposals:
            proposals[rename_keys[key]] = proposals.pop(key)

    # Fetch the indexes of all proposals that are in the dataset
    # Convert image_id to str since they could be int.
    img_ids = set({str(record["image_id"]) for record in dataset_dicts})
    id_to_index = {
        str(id): i
        for i, id in enumerate(proposals["ids"]) if str(id) in img_ids
    }

    # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
    bbox_mode = BoxMode(proposals["bbox_mode"]
                        ) if "bbox_mode" in proposals else BoxMode.XYXY_ABS

    for record in dataset_dicts:
        # Get the index of the proposal
        i = id_to_index[str(record["image_id"])]

        boxes = proposals["boxes"][i]
        objectness_logits = proposals["objectness_logits"][i]
        # Sort the proposals in descending order of the scores
        inds = objectness_logits.argsort()[::-1]
        record["proposal_boxes"] = boxes[inds]
        record["proposal_objectness_logits"] = objectness_logits[inds]
        record["proposal_bbox_mode"] = bbox_mode

    return dataset_dicts
Example #22
0
 def after_step(self):
     if self._profiler is None:
         return
     self._profiler.__exit__(None, None, None)
     PathManager.mkdirs(self._output_dir)
     out_file = os.path.join(
         self._output_dir,
         "profiler-trace-iter{}.json".format(self.trainer.iter))
     if "://" not in out_file:
         self._profiler.export_chrome_trace(out_file)
     else:
         # Support non-posix filesystems
         with tempfile.TemporaryDirectory(prefix="cvpods_profiler") as d:
             tmp_file = os.path.join(d, "tmp.json")
             self._profiler.export_chrome_trace(tmp_file)
             with open(tmp_file) as f:
                 content = f.read()
         with PathManager.open(out_file, "w") as f:
             f.write(content)
Example #23
0
    def tag_last_checkpoint(self, last_filename_basename: str):
        """
        Tag the last checkpoint.

        Args:
            last_filename_basename (str): the basename of the last filename.
        """
        save_file = os.path.join(self.save_dir, "last_checkpoint")
        with PathManager.open(save_file, "w") as f:
            f.write(last_filename_basename)
Example #24
0
    def _eval_predictions(self, tasks):
        """
        Evaluate self._predictions on the given tasks.
        Fill self._results with the metrics of the tasks.
        """
        self._logger.info("Preparing results for COCO format ...")
        self._coco_results = list(
            itertools.chain(*[x["instances"] for x in self._predictions]))

        # unmap the category ids for COCO
        if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
            reverse_id_mapping = {
                v: k
                for k, v in
                self._metadata.thing_dataset_id_to_contiguous_id.items()
            }
            for result in self._coco_results:
                category_id = result["category_id"]
                assert (
                    category_id in reverse_id_mapping
                ), "A prediction has category_id={}, which is not available in the dataset.".format(
                    category_id)
                result["category_id"] = reverse_id_mapping[category_id]

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "coco_instances_results.json")
            self._logger.info("Saving results to {}".format(file_path))
            with PathManager.open(file_path, "w") as f:
                f.write(json.dumps(self._coco_results))
                f.flush()

        if not self._do_evaluation:
            self._logger.info("Annotations are not available for evaluation.")
            return

        self._logger.info("Evaluating predictions ...")
        for task in sorted(tasks):
            coco_eval, summary = (
                _evaluate_predictions_on_coco(
                    self._coco_api,
                    self._coco_results,
                    task,
                    kpt_oks_sigmas=self._kpt_oks_sigmas)
                if len(self._coco_results) > 0 else
                None  # cocoapi does not handle empty results very well
            )
            self._logger.info("\n" + summary.getvalue())
            res = self._derive_coco_results(
                coco_eval,
                task,
                summary,
                class_names=self._metadata.thing_classes)
            self._results[task] = res
Example #25
0
def build_darknet_backbone(cfg, input_shape):
    depth = cfg.MODEL.DARKNET.DEPTH
    stem_channels = cfg.MODEL.DARKNET.STEM_OUT_CHANNELS
    output_features = cfg.MODEL.DARKNET.OUT_FEATURES

    model = Darknet(depth, input_shape.channels, stem_channels,
                    output_features)
    filename = cfg.MODEL.DARKNET.WEIGHTS
    with PathManager.open(filename, "rb") as f:
        state_dict = torch.load(f, map_location='cpu')
    model.load_state_dict(state_dict)

    return model
Example #26
0
    def _eval_predictions(self, tasks):
        """
        Evaluate self._predictions on the given tasks.
        Fill self._results with the metrics of the tasks.
        """
        self._logger.info("Preparing results in the LVIS format ...")
        self._lvis_results = list(
            itertools.chain(*[x["instances"] for x in self._predictions]))

        # LVIS evaluator can be used to evaluate results for COCO dataset categories.
        # In this case `_metadata` variable will have a field with COCO-specific category mapping.
        if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
            reverse_id_mapping = {
                v: k
                for k, v in
                self._metadata.thing_dataset_id_to_contiguous_id.items()
            }
            for result in self._lvis_results:
                result["category_id"] = reverse_id_mapping[
                    result["category_id"]]
        else:
            # unmap the category ids for LVIS (from 0-indexed to 1-indexed)
            for result in self._lvis_results:
                result["category_id"] += 1

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "lvis_instances_results.json")
            self._logger.info("Saving results to {}".format(file_path))
            with PathManager.open(file_path, "w") as f:
                f.write(json.dumps(self._lvis_results))
                f.flush()

        if not self._do_evaluation:
            self._logger.info("Annotations are not available for evaluation.")
            return

        self._logger.info(
            "Evaluating predictions with use_fast_impl={} ...".format(
                self._use_fast_impl))
        for task in sorted(tasks):
            lvis_eval, summary = (_evaluate_predictions_on_lvis(
                self._lvis_api,
                self._lvis_results,
                task,
                use_fast_impl=self._use_fast_impl,
                max_dets=self._max_dets)
                                  if len(self._lvis_results) > 0 else None)
            self._logger.info("\n" + summary.getvalue())
            res = self._derive_lvis_results(lvis_eval, task, summary)
            self._results[task] = res
Example #27
0
 def get_checkpoint_file(self):
     """
     Returns:
         str: The latest checkpoint file in target directory.
     """
     save_file = os.path.join(self.save_dir, "last_checkpoint")
     try:
         with PathManager.open(save_file, "r") as f:
             last_saved = f.read().strip()
     except IOError:
         # if file doesn't exist, maybe because it has just been
         # deleted by a separate process
         return ""
     return os.path.join(self.save_dir, last_saved)
Example #28
0
    def load(self, path: str):
        """
        Load from the given checkpoint. When path points to network file, this
        function has to be called on all ranks.

        Args:
            path (str): path or url to the checkpoint. If empty, will not load
                anything.
        Returns:
            dict:
                extra data loaded from the checkpoint that has not been
                processed. For example, those saved with
                :meth:`.save(**extra_data)`.
        """
        if not path:
            # no checkpoint provided
            self.logger.info(
                "No checkpoint found. Initializing model from scratch")
            return {}
        self.logger.info("Loading checkpoint from {}".format(path))
        if not os.path.isfile(path):
            path = PathManager.get_local_path(path)
            assert PathManager.isfile(path), "Checkpoint {} not found!".format(
                path)

        checkpoint = self._load_file(path)
        self._load_model(checkpoint)
        if self.resume:
            for key, obj in self.checkpointables.items():
                if key in checkpoint:
                    self.logger.info("Loading {} from {}".format(key, path))
                    obj.load_state_dict(checkpoint.pop(key))
            # return any further checkpoint data
            return checkpoint
        else:
            return {}
Example #29
0
    def evaluate(self):
        if self._distributed:
            comm.synchronize()
            self._predictions = comm.gather(self._predictions, dst=0)
            self._predictions = list(itertools.chain(*self._predictions))

            if not comm.is_main_process():
                return {}

        if len(self._predictions) == 0:
            self._logger.warning(
                "[COCOEvaluator] Did not receive valid predictions.")
            return {}

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "instances_predictions.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(self._predictions, f)

        self._results = OrderedDict()
        if "proposals" in self._predictions[0]:
            self._eval_box_proposals()
        if "instances" in self._predictions[0]:
            self._eval_predictions(set(self._tasks))

        if self._dump:
            extra_infos = {
                "title": os.path.basename(os.getcwd()),
                "seed": self.cfg.SEED,
            }
            _dump_to_markdown(extra_infos, self._dump_infos)

        # Copy so the caller can do whatever with results
        return copy.deepcopy(self._results)
Example #30
0
def setup_cfg(args):
    # load config from file and command-line arguments
    cfg = get_config(args.config, None)
    cfg.merge_from_list(args.opts)

    if cfg.MODEL.WEIGHTS == "":
        oss_prefix = os.path.join(cfg.OSS.MODEL_PREFIX, "model_zoo")
        file_path = os.path.join(oss_prefix, args.config, "model_final.pth")
        logger.warning(
            f"No checkpoint file specified, "
            f"trying to get it from Model Zoo (OSS URI: {file_path}).")
        assert PathManager.isfile(
            file_path), f"No checkpoint file found in {file_path}."
        cfg.MODEL.WEIGHTS = file_path

    return cfg