def get_valid_files(args, cfg, logger): if "MODEL.WEIGHTS" in args.opts: model_weights = cfg.MODEL.WEIGHTS assert PathManager.exists(model_weights), "{} not exist!!!".format( model_weights) return [model_weights] file_list = glob.glob(os.path.join(cfg.OUTPUT_DIR, "model_*.pth")) if len(file_list) == 0: # local file invalid, get it from oss model_prefix = cfg.OUTPUT_DIR.split("cvpods_playground")[-1][1:] remote_file_path = os.path.join(cfg.OSS.DUMP_PREFIX, model_prefix) logger.warning( "No checkpoint file was found locally, try to " f"load the corresponding dump file on OSS site: {remote_file_path}." ) file_list = [ str(filename) for filename in PathManager.ls(remote_file_path) if re.match(r"model_.+\.pth", filename.name) is not None ] assert len(file_list) != 0, "No valid file found on OSS" file_list = filter_by_iters(file_list, args.start_iter, args.end_iter) assert file_list, "No checkpoint valid in {}.".format(cfg.OUTPUT_DIR) logger.info("All files below will be tested in order:\n{}".format( pformat(file_list))) return file_list
def get_all_checkpoint_files(self): """ Returns: list: All available checkpoint files (.pth files) in target directory. """ all_model_checkpoints = [ os.path.join(self.save_dir, file) for file in PathManager.ls(self.save_dir) if PathManager.isfile(os.path.join(self.save_dir, file)) and file.endswith(".pth") ] return all_model_checkpoints
def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): """ Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are treated as ground truth annotations and all files under "image_root" with "image_ext" extension as input images. Ground truth and input images are matched using file paths relative to "gt_root" and "image_root" respectively without taking into account file extensions. This works for COCO as well as some other datasets. Args: gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation annotations are stored as images with integer values in pixels that represent corresponding semantic labels. image_root (str): the directory where the input images are. gt_ext (str): file extension for ground truth annotations. image_ext (str): file extension for input images. Returns: list[dict]: a list of dicts in cvpods standard format without instance-level annotation. Notes: 1. This function does not read the image and ground truth files. The results do not have the "image" and "sem_seg" fields. """ # We match input images with ground truth based on their relative filepaths (without file # extensions) starting from 'image_root' and 'gt_root' respectively. def file2id(folder_path, file_path): # extract relative path starting from `folder_path` image_id = os.path.normpath( os.path.relpath(file_path, start=folder_path)) # remove file extension image_id = os.path.splitext(image_id)[0] return image_id input_files = sorted( (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), key=lambda file_path: file2id(image_root, file_path), ) gt_files = sorted( (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=lambda file_path: file2id(gt_root, file_path), ) assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images if len(input_files) != len(gt_files): logger.warn( "Directory {} and {} has {} and {} files, respectively.".format( image_root, gt_root, len(input_files), len(gt_files))) input_basenames = [ os.path.basename(f)[:-len(image_ext)] for f in input_files ] gt_basenames = [os.path.basename(f)[:-len(gt_ext)] for f in gt_files] intersect = list(set(input_basenames) & set(gt_basenames)) # sort, otherwise each worker may obtain a list[dict] in different order intersect = sorted(intersect) logger.warn("Will use their intersection of {} files.".format( len(intersect))) input_files = [ os.path.join(image_root, f + image_ext) for f in intersect ] gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] logger.info("Loaded {} images with semantic segmentation from {}".format( len(input_files), image_root)) dataset_dicts = [] for (img_path, gt_path) in zip(input_files, gt_files): record = {} record["file_name"] = img_path record["sem_seg_file_name"] = gt_path dataset_dicts.append(record) return dataset_dicts