def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info): files = [] # scan through the directory cities = PathManager.ls(image_dir) logger.info(f"{len(cities)} cities found in '{image_dir}'.") image_dict = {} for city in cities: city_img_dir = os.path.join(image_dir, city) for basename in PathManager.ls(city_img_dir): image_file = os.path.join(city_img_dir, basename) suffix = "_leftImg8bit.png" assert basename.endswith(suffix), basename basename = os.path.basename(basename)[: -len(suffix)] image_dict[basename] = image_file for ann in json_info["annotations"]: image_file = image_dict.get(ann["image_id"], None) assert image_file is not None, "No image {} found for annotation {}".format( ann["image_id"], ann["file_name"] ) label_file = os.path.join(gt_dir, ann["file_name"]) segments_info = ann["segments_info"] files.append((image_file, label_file, segments_info)) assert len(files), "No images found in {}".format(image_dir) assert PathManager.isfile(files[0][0]), files[0][0] assert PathManager.isfile(files[0][1]), files[0][1] return files
def test_bad_args(self) -> None: with self.assertRaises(NotImplementedError): PathManager.copy( self._remote_uri, self._remote_uri, foo="foo" # type: ignore ) with self.assertRaises(NotImplementedError): PathManager.exists(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.get_local_path( self._remote_uri, foo="foo" # type: ignore ) with self.assertRaises(NotImplementedError): PathManager.isdir(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.isfile(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.ls(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.mkdirs(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.open(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.rm(self._remote_uri, foo="foo") # type: ignore PathManager.set_strict_kwargs_checking(False) PathManager.get_local_path(self._remote_uri, foo="foo") # type: ignore f = PathManager.open(self._remote_uri, foo="foo") # type: ignore f.close() PathManager.set_strict_kwargs_checking(True)
def get_local_path(input_file, dest_dir): """ If user specified copying data to a local directory, get the local path where the data files were copied. - If input_file is just a file, we return the dest_dir/filename - If the intput_file is a directory, then we check if the environemt is SLURM and use slurm_dir or otherwise dest_dir to look up copy_complete file is available. If available, we return the directory. - If both above fail, we return the input_file as is. """ out = "" if PathManager.isfile(input_file): out = os.path.join(dest_dir, os.path.basename(input_file)) elif PathManager.isdir(input_file): data_name = input_file.strip("/").split("/")[-1] if "SLURM_JOBID" in os.environ: dest_dir = get_slurm_dir(dest_dir) dest_dir = os.path.join(dest_dir, data_name) complete_flag = os.path.join(dest_dir, "copy_complete") if PathManager.isfile(complete_flag): out = dest_dir if PathManager.exists(out): return out else: return input_file
def test_isfile(self) -> None: self.assertTrue(PathManager.isfile(self._tmpfile)) # pyre-ignore # This is a directory, not a file, so it should fail self.assertFalse(PathManager.isfile(self._tmpdir)) # pyre-ignore # This is a non-existing path, so it should fail fake_path = os.path.join(self._tmpdir, uuid.uuid4().hex) # pyre-ignore self.assertFalse(PathManager.isfile(fake_path))
def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta): """ Args: image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/cityscapes_panoptic_train". gt_json (str): path to the json file. e.g., "~/cityscapes/gtFine/cityscapes_panoptic_train.json". meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id" and "stuff_dataset_id_to_contiguous_id" to map category ids to contiguous ids for training. Returns: list[dict]: a list of dicts in Detectron2 standard format. (See `Using Custom Datasets </tutorials/datasets.html>`_ ) """ def _convert_category_id(segment_info, meta): if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ segment_info["category_id"] ] else: segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ segment_info["category_id"] ] return segment_info assert os.path.exists( gt_json ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa with open(gt_json) as f: json_info = json.load(f) files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info) ret = [] for image_file, label_file, segments_info in files: sem_label_file = ( image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png" ) segments_info = [_convert_category_id(x, meta) for x in segments_info] ret.append( { "file_name": image_file, "image_id": "_".join( os.path.splitext(os.path.basename(image_file))[0].split("_")[:3] ), "sem_seg_file_name": sem_label_file, "pan_seg_file_name": label_file, "segments_info": segments_info, } ) assert len(ret), f"No images found in {image_dir}!" assert PathManager.isfile( ret[0]["sem_seg_file_name"] ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa assert PathManager.isfile( ret[0]["pan_seg_file_name"] ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa return ret
def test_rm(self): with open(os.path.join(self._tmpdir, "test_rm.txt"), "w") as f: rm_file = f.name f.write(self._tmpfile_contents) f.flush() self.assertTrue(PathManager.exists(rm_file)) self.assertTrue(PathManager.isfile(rm_file)) PathManager.rm(rm_file) self.assertFalse(PathManager.exists(rm_file)) self.assertFalse(PathManager.isfile(rm_file))
def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta): """ Args: image_dir (str): path to the raw dataset. e.g., "~/coco/train2017". gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017". json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json". Returns: list[dict]: a list of dicts in Detectron2 standard format. (See `Using Custom Datasets </tutorials/datasets.html>`_ ) """ def _convert_category_id(segment_info, meta): if segment_info["category_id"] in meta[ "thing_dataset_id_to_contiguous_id"]: segment_info["category_id"] = meta[ "thing_dataset_id_to_contiguous_id"][ segment_info["category_id"]] segment_info["isthing"] = True else: segment_info["category_id"] = meta[ "stuff_dataset_id_to_contiguous_id"][ segment_info["category_id"]] segment_info["isthing"] = False return segment_info with PathManager.open(json_file) as f: json_info = json.load(f) ret = [] for ann in json_info["annotations"]: image_id = int(ann["image_id"]) # TODO: currently we assume image and label has the same filename but # different extension, and images have extension ".jpg" for COCO. Need # to make image extension a user-provided argument if we extend this # function to support other COCO-like datasets. image_file = os.path.join( image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg") assert PathManager.isfile(image_file), image_file label_file = os.path.join(gt_dir, ann["file_name"]) assert PathManager.isfile(label_file), label_file segments_info = [ _convert_category_id(x, meta) for x in ann["segments_info"] ] ret.append({ "file_name": image_file, "image_id": image_id, "pan_seg_file_name": label_file, "segments_info": segments_info, }) assert len(ret), f"No images found in {image_dir}!" assert PathManager.isfile( ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"] return ret
def setUp(self): json_file = MetadataCatalog.get("coco_2017_val_100").json_file if not PathManager.isfile(json_file): raise unittest.SkipTest("{} not found".format(json_file)) with contextlib.redirect_stdout(io.StringIO()): json_file = PathManager.get_local_path(json_file) self.coco = COCO(json_file)
def get_cityscapes_files(image_dir, gt_dir): files = [] # scan through the directory cities = PathManager.ls(image_dir) logger.info(f"{len(cities)} cities found in '{image_dir}'.") for city in cities: city_img_dir = os.path.join(image_dir, city) city_gt_dir = os.path.join(gt_dir, city) for basename in PathManager.ls(city_img_dir): image_file = os.path.join(city_img_dir, basename) suffix = "leftImg8bit.png" assert basename.endswith(suffix) basename = basename[:-len(suffix)] instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png") label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png") json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json") files.append((image_file, instance_file, label_file, json_file)) assert len(files), "No images found in {}".format(image_dir) for f in files[0]: assert PathManager.isfile(f), f return files
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: assert PathManager.isfile( cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" # logger = logging.getLogger(__name__) logger = setup_logger(name='person_track') loaded_ver = loaded_cfg.get("VERSION", None) logger.debug(f"loaded_ver is: {loaded_ver}") assert loaded_ver == self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg)
def copy_dir(input_dir, destination_dir, num_threads): """ Copy contents of one directory to the specified destination directory using the number of threads to speed up the copy. When the data is copied successfully, we create a copy_complete file in the destination_dir folder to mark the completion. If the destination_dir folder already exists and has the copy_complete file, we don't copy the file. useful for copying datasets like ImageNet to speed up dataloader. Using 20 threads for imagenet takes about 20 minutes to copy. Returns: destination_dir (str): directory where the contents were copied """ # remove the backslash if user added it data_name = input_dir.strip("/").split("/")[-1] if "SLURM_JOBID" in os.environ: destination_dir = get_slurm_dir(destination_dir) destination_dir = f"{destination_dir}/{data_name}" makedir(destination_dir) complete_flag = f"{destination_dir}/copy_complete" if PathManager.isfile(complete_flag): logging.info(f"Found Data already copied: {destination_dir}...") return destination_dir logging.info( f"Copying {input_dir} to dir {destination_dir} using {num_threads} threads" ) # We have to do multi-threaded rsync to speed up copy. cmd = (f"ls -d {input_dir}/* | parallel -j {num_threads} --will-cite " f"rsync -ruW --inplace {{}} {destination_dir}") os.system(cmd) PathManager.open(complete_flag, "a").close() logging.info("Copied to local directory") return destination_dir, destination_dir
def load_cityscapes_semantic(image_dir, gt_dir): """ Args: image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". Returns: list[dict]: a list of dict, each has "file_name" and "sem_seg_file_name". """ ret = [] # gt_dir is small and contain many small files. make sense to fetch to local first gt_dir = PathManager.get_local_path(gt_dir) for image_file, _, label_file, json_file in get_cityscapes_files( image_dir, gt_dir): label_file = label_file.replace("labelIds", "labelTrainIds") with PathManager.open(json_file, "r") as f: jsonobj = json.load(f) ret.append({ "file_name": image_file, "sem_seg_file_name": label_file, "height": jsonobj["imgHeight"], "width": jsonobj["imgWidth"], }) assert len(ret), f"No images found in {image_dir}!" assert PathManager.isfile( ret[0]["sem_seg_file_name"] ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa return ret
def load_single_label_file(self, path: str): """ Load the single data file. We only support user specifying the numpy label files if user is specifying a data_filelist source of labels. To save memory, if the mmap_mode is set to True for loading, we try to load the images in mmap_mode. If it fails, we simply load the labels without mmap """ assert PathManager.isfile(path), f"Path to labels {path} is not a file" assert path.endswith("npy"), "Please specify a numpy file for labels" if self.cfg["DATA"][self.split].MMAP_MODE: try: with PathManager.open(path, "rb") as fopen: labels = np.load(fopen, allow_pickle=True, mmap_mode="r") except ValueError as e: logging.info( f"Could not mmap {path}: {e}. Trying without PathManager") labels = np.load(path, allow_pickle=True, mmap_mode="r") logging.info("Successfully loaded without PathManager") except Exception: logging.info( "Could not mmap without PathManager. Trying without mmap") with PathManager.open(path, "rb") as fopen: labels = np.load(fopen, allow_pickle=True) else: with PathManager.open(path, "rb") as fopen: labels = np.load(fopen, allow_pickle=True) return labels
def __init__(self, cfg, data_source, path, split, dataset_name): super(DiskImageDataset, self).__init__( queue_size=cfg["DATA"][split]["BATCHSIZE_PER_REPLICA"]) assert data_source in [ "disk_filelist", "disk_folder", ], "data_source must be either disk_filelist or disk_folder" if data_source == "disk_filelist": assert PathManager.isfile(path), f"File {path} does not exist" elif data_source == "disk_folder": assert PathManager.isdir(path), f"Directory {path} does not exist" self.cfg = cfg self.split = split self.dataset_name = dataset_name self.data_source = data_source self._path = path self.image_dataset = [] self.is_initialized = False self._load_data(path) self._num_samples = len(self.image_dataset) if self.data_source == "disk_filelist": # Set dataset to null so that workers dont need to pickle this file. # This saves memory when disk_filelist is large, especially when memory mapping. self.image_dataset = [] # whether to use QueueDataset class to handle invalid images or not self.enable_queue_dataset = cfg["DATA"][ self.split]["ENABLE_QUEUE_DATASET"]
def copy_data(input_file, destination_dir, num_threads, tmp_destination_dir): """ Copy data from one source to the other using num_threads. The data to copy can be a single file or a directory. We check what type of data and call the relevant functions. Returns: output_file (str): the new path of the data (could be file or dir) destination_dir (str): the destination dir that was actually used """ # return whatever the input is: whether "", None or anything else. logging.info(f"Creating directory: {destination_dir}") if not (destination_dir is None or destination_dir == ""): makedir(destination_dir) else: destination_dir = None if PathManager.isfile(input_file): output_file, output_dir = copy_file(input_file, destination_dir, tmp_destination_dir) elif PathManager.isdir(input_file): output_file, output_dir = copy_dir(input_file, destination_dir, num_threads) else: raise RuntimeError("The input_file is neither a file nor a directory") return output_file, output_dir
def test_bad_args(self) -> None: # TODO (T58240718): Replace with dynamic checks with self.assertRaises(ValueError): PathManager.copy( self._tmpfile, self._tmpfile, foo="foo" # type: ignore ) with self.assertRaises(ValueError): PathManager.exists(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.get_local_path(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.isdir(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.isfile(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.ls(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.mkdirs(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.open(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.rm(self._tmpfile, foo="foo") # type: ignore PathManager.set_strict_kwargs_checking(False) PathManager.copy( self._tmpfile, self._tmpfile, foo="foo" # type: ignore ) PathManager.exists(self._tmpfile, foo="foo") # type: ignore PathManager.get_local_path(self._tmpfile, foo="foo") # type: ignore PathManager.isdir(self._tmpfile, foo="foo") # type: ignore PathManager.isfile(self._tmpfile, foo="foo") # type: ignore PathManager.ls(self._tmpdir, foo="foo") # type: ignore PathManager.mkdirs(self._tmpdir, foo="foo") # type: ignore f = PathManager.open(self._tmpfile, foo="foo") # type: ignore f.close() # pyre-ignore with open(os.path.join(self._tmpdir, "test_rm.txt"), "w") as f: rm_file = f.name f.write(self._tmpfile_contents) f.flush() PathManager.rm(rm_file, foo="foo") # type: ignore
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: assert PathManager.isfile( cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) self.merge_from_other_cfg(loaded_cfg)
def load(self, load_path: str, overwrite_config=None): """ Loads a checkpoint from disk. Args: load_path (str): the file path to load for checkpoint Returns: task (Task), config (PyTextConfig) and training_state (TrainingState) """ if not (load_path and PathManager.isfile(load_path)): raise ValueError(f"Invalid snapshot path{load_path}") print(f"Loading model from {load_path}") with PathManager.open(load_path, "rb") as checkpoint_f: return load_checkpoint(checkpoint_f, overwrite_config)
def get_all_checkpoint_files(self) -> List[str]: """ Returns: list: All available checkpoint files (.pth files) in target directory. """ all_model_checkpoints = [ os.path.join(self.save_dir, file) for file in PathManager.ls(self.save_dir) if PathManager.isfile(os.path.join(self.save_dir, file)) and file.endswith(".pth") ] return all_model_checkpoints
def get_image(img_path, resize=256, replace_prefix="", new_prefix=""): is_success = False try: if PathManager.isfile(img_path) and PathManager.exists(img_path): img_path = _replace_img_path_prefix(img_path, replace_prefix, new_prefix) with PathManager.open(img_path, "rb") as fopen: img = Image.open(fopen).convert("RGB") is_success = True except Exception as e: print(e) img = Image.fromarray(128 * np.ones( (resize, resize, 3), dtype=np.uint8)) return img, is_success
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: assert PathManager.isfile( cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.". format(loaded_ver, cfg_filename, self.VERSION)) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config)
def load_cityscapes_semantic(image_dir, gt_dir): ret = [] gt_dir = PathManager.get_local_path(gt_dir) for image_file, _, label_file, json_file in get_cityscapes_files( image_dir, gt_dir): label_file = label_file.replace("labelIds", "labelTrainIds") with PathManager.open(json_file, "r") as f: jsonobj = json.load(f) ret.append({ "file_name": image_file, "sem_seg_file_name": label_file, "height": jsonobj["imgHeight"], "width": jsonobj["imgWidth"], }) assert len(ret), f"No images found in {image_dir}!" assert PathManager.isfile(ret[0]["sem_seg_file_name"]), \ "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" return ret
def prepare_task( config: PyTextConfig, dist_init_url: str = None, device_id: int = 0, rank: int = 0, world_size: int = 1, metric_channels: Optional[List[Channel]] = None, metadata: CommonMetadata = None, ) -> Tuple[Task_Deprecated, TrainingState]: if rank == 0: print("\nParameters: {}\n".format(config), flush=True) _set_cuda(config.use_cuda_if_available, device_id, world_size) _set_fp16(config.use_fp16, rank) _set_distributed(rank, world_size, dist_init_url, device_id) if config.random_seed is not None: set_random_seeds(config.random_seed, config.use_deterministic_cudnn) training_state = None if config.load_snapshot_path and PathManager.isfile( config.load_snapshot_path): if config.use_config_from_snapshot: task, _, training_state = load(config.load_snapshot_path) else: task, _, training_state = load(config.load_snapshot_path, overwrite_config=config) if training_state: training_state.rank = rank else: task = create_task(config.task, metadata=metadata, rank=rank, world_size=world_size) for mc in metric_channels or []: task.metric_reporter.add_channel(mc) return task, training_state
def test_isfile(self): self.assertTrue(PathManager.isfile(self._tmpfile)) # Directory, not a file: self.assertFalse(PathManager.isfile(self._tmpdir))
def isfile(path: str) -> bool: if FVCorePathManager: return FVCorePathManager.isfile(path) return os.path.isfile(path)
def __getitem__(self, idx): sid = self.synset_ids[idx] mid = self.model_ids[idx] iid = self.image_ids[idx] # Always read metadata for this model; TODO cache in __init__? metadata_path = os.path.join(self.data_dir, sid, mid, "metadata.pt") with PathManager.open(metadata_path, "rb") as f: metadata = torch.load(f) K = metadata["intrinsic"] RT = metadata["extrinsics"][iid] img_path = metadata["image_list"][iid] img_path = os.path.join(self.data_dir, sid, mid, "images", img_path) # Load the image with PathManager.open(img_path, "rb") as f: img = Image.open(f).convert("RGB") img = self.transform(img) # Maybe read mesh verts, faces = None, None if self.return_mesh: mesh_path = os.path.join(self.data_dir, sid, mid, "mesh.pt") with PathManager.open(mesh_path, "rb") as f: mesh_data = torch.load(f) verts, faces = mesh_data["verts"], mesh_data["faces"] verts = project_verts(verts, RT) # Maybe use cached samples points, normals = None, None if not self.sample_online: samples = self.mid_to_samples.get(mid, None) if samples is None: # They were not cached in memory, so read off disk samples_path = os.path.join(self.data_dir, sid, mid, "samples.pt") with PathManager.open(samples_path, "rb") as f: samples = torch.load(f) points = samples["points_sampled"] normals = samples["normals_sampled"] idx = torch.randperm(points.shape[0])[:self.num_samples] points, normals = points[idx], normals[idx] points = project_verts(points, RT) normals = normals.mm( RT[:3, :3].t()) # Only rotate, don't translate voxels, P = None, None if self.voxel_size > 0: # Use precomputed voxels if we have them, otherwise return voxel_coords # and we will compute voxels in postprocess voxel_file = "vox%d/%03d.pt" % (self.voxel_size, iid) voxel_file = os.path.join(self.data_dir, sid, mid, voxel_file) if PathManager.isfile(voxel_file): with PathManager.open(voxel_file, "rb") as f: voxels = torch.load(f) else: voxel_path = os.path.join(self.data_dir, sid, mid, "voxels.pt") with PathManager.open(voxel_path, "rb") as f: voxel_data = torch.load(f) voxels = voxel_data["voxel_coords"] P = K.mm(RT) id_str = "%s-%s-%02d" % (sid, mid, iid) return img, verts, faces, points, normals, voxels, P, id_str