def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, decoder: Optional[Callable[[io.IOBase], torch.Tensor]], ) -> IterDataPipe[Dict[str, Any]]: archive_dp = resource_dps[0] splits_dp, joint_categories_dp, images_dp = Demultiplexer( archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE ) splits_dp = Filter(splits_dp, path_comparator("name", f"{config.split}{config.fold}.txt")) splits_dp = LineReader(splits_dp, decode=True, return_path=False) splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE) splits_dp = hint_sharding(splits_dp) joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ") dp = IterKeyZipper( splits_dp, joint_categories_dp, key_fn=getitem(), ref_key_fn=getitem(0), buffer_size=INFINITE_BUFFER_SIZE, ) dp = IterKeyZipper( dp, images_dp, key_fn=getitem(0), ref_key_fn=self._image_key_fn, buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, ) -> IterDataPipe[Dict[str, Any]]: archive_dp, extra_split_dp = resource_dps archive_dp = resource_dps[0] split_dp, images_dp, anns_dp = Demultiplexer( archive_dp, 3, self._classify_archive, buffer_size=INFINITE_BUFFER_SIZE, drop_none=True, ) if config.split == "train_noval": split_dp = extra_split_dp split_dp = Filter(split_dp, path_comparator("name", f"{config.split}.txt")) split_dp = LineReader(split_dp, decode=True) split_dp = hint_sharding(split_dp) split_dp = hint_shuffling(split_dp) dp = split_dp for level, data_dp in enumerate((images_dp, anns_dp)): dp = IterKeyZipper( dp, data_dp, key_fn=getitem(*[0] * level, 1), ref_key_fn=path_accessor("stem"), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper(dp, self._prepare_sample)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: archive_dp = resource_dps[0] images_dp, scenes_dp = Demultiplexer( archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) images_dp = Filter(images_dp, path_comparator("parent.name", self._split)) images_dp = hint_shuffling(images_dp) images_dp = hint_sharding(images_dp) if self._split != "test": scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json")) scenes_dp = JsonParser(scenes_dp) scenes_dp = Mapper(scenes_dp, getitem(1, "scenes")) scenes_dp = UnBatcher(scenes_dp) dp = IterKeyZipper( images_dp, scenes_dp, key_fn=path_accessor("name"), ref_key_fn=getitem("image_filename"), buffer_size=INFINITE_BUFFER_SIZE, ) else: dp = Mapper(images_dp, self._add_empty_anns) return Mapper(dp, self._prepare_sample)
def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, decoder: Optional[Callable[[io.IOBase], torch.Tensor]], ) -> IterDataPipe[Dict[str, Any]]: if config.split == "train": images_dp, ann_dp = Demultiplexer(resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE) else: images_dp, ann_dp = resource_dps images_dp = Filter(images_dp, path_comparator("suffix", ".ppm")) # The order of the image files in the the .zip archives perfectly match the order of the entries in # the (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper. ann_dp = CSVDictParser(ann_dp, delimiter=";") dp = Zipper(images_dp, ann_dp) dp = hint_sharding(dp) dp = hint_shuffling(dp) dp = Mapper(dp, partial(self._collate_and_decode, decoder=decoder)) return dp
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: archive_dp = resource_dps[0] split_dp, images_dp, anns_dp = Demultiplexer( archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) split_dp = Filter(split_dp, functools.partial(self._is_in_folder, name=self._split_folder)) split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt")) split_dp = LineReader(split_dp, decode=True) split_dp = hint_shuffling(split_dp) split_dp = hint_sharding(split_dp) dp = split_dp for level, data_dp in enumerate((images_dp, anns_dp)): dp = IterKeyZipper( dp, data_dp, key_fn=getitem(*[0] * level, 1), ref_key_fn=path_accessor("stem"), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper(dp, self._prepare_sample)
def _make_datapipe(self, resource_dps: List[IterDataPipe], *, config: DatasetConfig) -> IterDataPipe[Dict[str, Any]]: archive_dp = resource_dps[0] images_dp, labels_dp = Demultiplexer( archive_dp, 2, functools.partial(self._classify_archive, config=config), drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) return super()._make_datapipe([images_dp, labels_dp], config=config)
def _datapipe( self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: archive_dp = resource_dps[0] images_dp, labels_dp = Demultiplexer( archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) return super()._datapipe([images_dp, labels_dp])
def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, decoder: Optional[Callable[[io.IOBase], torch.Tensor]], ) -> IterDataPipe[Dict[str, Any]]: images_dp, anns_dp = resource_dps images_dp = Filter(images_dp, self._filter_images) split_and_classification_dp, segmentations_dp = Demultiplexer( anns_dp, 2, self._classify_anns, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) split_and_classification_dp = Filter( split_and_classification_dp, path_comparator("name", f"{config.split}.txt")) split_and_classification_dp = CSVDictParser( split_and_classification_dp, fieldnames=("image_id", "label", "species"), delimiter=" ") split_and_classification_dp = hint_sharding( split_and_classification_dp) split_and_classification_dp = hint_shuffling( split_and_classification_dp) segmentations_dp = Filter(segmentations_dp, self._filter_segmentations) anns_dp = IterKeyZipper( split_and_classification_dp, segmentations_dp, key_fn=getitem("image_id"), ref_key_fn=path_accessor("stem"), buffer_size=INFINITE_BUFFER_SIZE, ) dp = IterKeyZipper( anns_dp, images_dp, key_fn=getitem(0, "image_id"), ref_key_fn=path_accessor("stem"), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper( dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
def _datapipe( self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: images_dp, meta_dp = resource_dps if self._annotations is None: dp = hint_shuffling(images_dp) dp = hint_sharding(dp) dp = hint_shuffling(dp) return Mapper(dp, self._prepare_image) meta_dp = Filter(meta_dp, self._filter_meta_files) meta_dp = JsonParser(meta_dp) meta_dp = Mapper(meta_dp, getitem(1)) meta_dp: IterDataPipe[Dict[str, Dict[str, Any]]] = MappingIterator(meta_dp) images_meta_dp, anns_meta_dp = Demultiplexer( meta_dp, 2, self._classify_meta, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) images_meta_dp = Mapper(images_meta_dp, getitem(1)) images_meta_dp = UnBatcher(images_meta_dp) anns_meta_dp = Mapper(anns_meta_dp, getitem(1)) anns_meta_dp = UnBatcher(anns_meta_dp) anns_meta_dp = Grouper(anns_meta_dp, group_key_fn=getitem("image_id"), buffer_size=INFINITE_BUFFER_SIZE) anns_meta_dp = hint_shuffling(anns_meta_dp) anns_meta_dp = hint_sharding(anns_meta_dp) anns_dp = IterKeyZipper( anns_meta_dp, images_meta_dp, key_fn=getitem(0, "image_id"), ref_key_fn=getitem("id"), buffer_size=INFINITE_BUFFER_SIZE, ) dp = IterKeyZipper( anns_dp, images_dp, key_fn=getitem(1, "file_name"), ref_key_fn=path_accessor("name"), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper(dp, self._prepare_sample)
def _datapipe( self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: images_dp, anns_dp = resource_dps images_dp = Filter(images_dp, self._filter_images) split_and_classification_dp, segmentations_dp = Demultiplexer( anns_dp, 2, self._classify_anns, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) split_and_classification_dp = Filter( split_and_classification_dp, path_comparator("name", f"{self._split}.txt")) split_and_classification_dp = CSVDictParser( split_and_classification_dp, fieldnames=("image_id", "label", "species"), delimiter=" ") split_and_classification_dp = hint_shuffling( split_and_classification_dp) split_and_classification_dp = hint_sharding( split_and_classification_dp) segmentations_dp = Filter(segmentations_dp, self._filter_segmentations) anns_dp = IterKeyZipper( split_and_classification_dp, segmentations_dp, key_fn=getitem("image_id"), ref_key_fn=path_accessor("stem"), buffer_size=INFINITE_BUFFER_SIZE, ) dp = IterKeyZipper( anns_dp, images_dp, key_fn=getitem(0, "image_id"), ref_key_fn=path_accessor("stem"), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper(dp, self._prepare_sample)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: if self._split == "train": images_dp, ann_dp = Demultiplexer( resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE ) else: images_dp, ann_dp = resource_dps images_dp = Filter(images_dp, path_comparator("suffix", ".ppm")) # The order of the image files in the .zip archives perfectly match the order of the entries in the # (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper. ann_dp = CSVDictParser(ann_dp, delimiter=";") dp = Zipper(images_dp, ann_dp) dp = hint_shuffling(dp) dp = hint_sharding(dp) return Mapper(dp, self._prepare_sample)
def _make_datapipe(self, resource_dps: List[IterDataPipe], *, config: DatasetConfig) -> IterDataPipe[Dict[str, Any]]: if config.split in {"train", "test"}: dp = resource_dps[0] # the train archive is a tar of tars if config.split == "train": dp = TarArchiveReader(dp) dp = hint_sharding(dp) dp = hint_shuffling(dp) dp = Mapper( dp, self._prepare_train_data if config.split == "train" else self._prepare_test_data) else: # config.split == "val": images_dp, devkit_dp = resource_dps meta_dp, label_dp = Demultiplexer(devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE) meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids) _, wnids = zip(*next(iter(meta_dp))) label_dp = LineReader(label_dp, decode=True, return_path=False) label_dp = Mapper( label_dp, functools.partial(self._imagenet_label_to_wnid, wnids=wnids)) label_dp: IterDataPipe[Tuple[int, str]] = Enumerator(label_dp, 1) label_dp = hint_sharding(label_dp) label_dp = hint_shuffling(label_dp) dp = IterKeyZipper( label_dp, images_dp, key_fn=getitem(0), ref_key_fn=self._val_test_image_key, buffer_size=INFINITE_BUFFER_SIZE, ) dp = Mapper(dp, self._prepare_val_data) return Mapper(dp, self._prepare_sample)
def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, ) -> IterDataPipe[Dict[str, Any]]: archive_dp = resource_dps[0] split_dp, images_dp, anns_dp = Demultiplexer( archive_dp, 3, functools.partial(self._classify_archive, config=config), drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) split_dp = Filter( split_dp, functools.partial(self._is_in_folder, name=self._SPLIT_FOLDER[config.task])) split_dp = Filter(split_dp, path_comparator("name", f"{config.split}.txt")) split_dp = LineReader(split_dp, decode=True) split_dp = hint_sharding(split_dp) split_dp = hint_shuffling(split_dp) dp = split_dp for level, data_dp in enumerate((images_dp, anns_dp)): dp = IterKeyZipper( dp, data_dp, key_fn=getitem(*[0] * level, 1), ref_key_fn=path_accessor("stem"), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper( dp, functools.partial( self._prepare_sample, prepare_ann_fn=self._prepare_detection_ann if config.task == "detection" else self._prepare_segmentation_ann, ), )
def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, decoder: Optional[Callable[[io.IOBase], torch.Tensor]], ) -> IterDataPipe[Dict[str, Any]]: archive_dp, extra_split_dp = resource_dps archive_dp = resource_dps[0] split_dp, images_dp, anns_dp = Demultiplexer( archive_dp, 3, self._classify_archive, buffer_size=INFINITE_BUFFER_SIZE, drop_none=True, ) if config.split == "train_noval": split_dp = extra_split_dp split_dp = Filter(split_dp, path_comparator("stem", config.split)) split_dp = LineReader(split_dp, decode=True) split_dp = hint_sharding(split_dp) split_dp = hint_shuffling(split_dp) dp = split_dp for level, data_dp in enumerate((images_dp, anns_dp)): dp = IterKeyZipper( dp, data_dp, key_fn=getitem(*[0] * level, 1), ref_key_fn=path_accessor("stem"), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper( dp, functools.partial(self._collate_and_decode_sample, config=config, decoder=decoder))
def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, decoder: Optional[Callable[[io.IOBase], torch.Tensor]], ) -> IterDataPipe[Dict[str, Any]]: archive_dp = resource_dps[0] images_dp, scenes_dp = Demultiplexer( archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) images_dp = Filter(images_dp, path_comparator("parent.name", config.split)) images_dp = hint_sharding(images_dp) images_dp = hint_shuffling(images_dp) if config.split != "test": scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{config.split}_scenes.json")) scenes_dp = JsonParser(scenes_dp) scenes_dp = Mapper(scenes_dp, getitem(1, "scenes")) scenes_dp = UnBatcher(scenes_dp) dp = IterKeyZipper( images_dp, scenes_dp, key_fn=path_accessor("name"), ref_key_fn=getitem("image_filename"), buffer_size=INFINITE_BUFFER_SIZE, ) else: dp = Mapper(images_dp, self._add_empty_anns) return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
def _datapipe( self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: archive_dp = resource_dps[0] images_dp, split_dp = Demultiplexer(archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE) split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt")) split_dp = LineReader(split_dp, decode=True, return_path=False) split_dp = hint_sharding(split_dp) split_dp = hint_shuffling(split_dp) dp = IterKeyZipper( split_dp, images_dp, key_fn=getitem(), ref_key_fn=self._image_key, buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper(dp, self._prepare_sample)
def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, decoder: Optional[Callable[[io.IOBase], torch.Tensor]], ) -> IterDataPipe[Dict[str, Any]]: if config.year == "2011": archive_dp, segmentations_dp = resource_dps images_dp, split_dp, image_files_dp, bounding_boxes_dp = Demultiplexer( archive_dp, 4, self._2011_classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE) image_files_dp = CSVParser(image_files_dp, dialect="cub200") image_files_map = dict( (image_id, rel_posix_path.rsplit("/", maxsplit=1)[1]) for image_id, rel_posix_path in image_files_dp) split_dp = CSVParser(split_dp, dialect="cub200") split_dp = Filter( split_dp, functools.partial(self._2011_filter_split, split=config.split)) split_dp = Mapper(split_dp, getitem(0)) split_dp = Mapper(split_dp, image_files_map.get) bounding_boxes_dp = CSVParser(bounding_boxes_dp, dialect="cub200") bounding_boxes_dp = Mapper(bounding_boxes_dp, image_files_map.get, input_col=0) anns_dp = IterKeyZipper( bounding_boxes_dp, segmentations_dp, key_fn=getitem(0), ref_key_fn=self._2011_segmentation_key, keep_key=True, buffer_size=INFINITE_BUFFER_SIZE, ) else: # config.year == "2010" split_dp, images_dp, anns_dp = resource_dps split_dp = Filter(split_dp, path_comparator("name", f"{config.split}.txt")) split_dp = LineReader(split_dp, decode=True, return_path=False) split_dp = Mapper(split_dp, self._2010_split_key) anns_dp = Mapper(anns_dp, self._2010_anns_key) split_dp = hint_sharding(split_dp) split_dp = hint_shuffling(split_dp) dp = IterKeyZipper( split_dp, images_dp, getitem(), path_accessor("name"), buffer_size=INFINITE_BUFFER_SIZE, ) dp = IterKeyZipper( dp, anns_dp, getitem(0), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper( dp, functools.partial(self._collate_and_decode_sample, year=config.year, decoder=decoder))
def _make_datapipe( self, resource_dps: List[IterDataPipe], *, config: DatasetConfig, decoder: Optional[Callable[[io.IOBase], torch.Tensor]], ) -> IterDataPipe[Dict[str, Any]]: images_dp, meta_dp = resource_dps if config.annotations is None: dp = hint_sharding(images_dp) dp = hint_shuffling(dp) return Mapper( dp, functools.partial(self._collate_and_decode_image, decoder=decoder)) meta_dp = Filter( meta_dp, functools.partial( self._filter_meta_files, split=config.split, year=config.year, annotations=config.annotations, ), ) meta_dp = JsonParser(meta_dp) meta_dp = Mapper(meta_dp, getitem(1)) meta_dp: IterDataPipe[Dict[str, Dict[str, Any]]] = MappingIterator(meta_dp) images_meta_dp, anns_meta_dp = Demultiplexer( meta_dp, 2, self._classify_meta, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE, ) images_meta_dp = Mapper(images_meta_dp, getitem(1)) images_meta_dp = UnBatcher(images_meta_dp) anns_meta_dp = Mapper(anns_meta_dp, getitem(1)) anns_meta_dp = UnBatcher(anns_meta_dp) anns_meta_dp = Grouper(anns_meta_dp, group_key_fn=getitem("image_id"), buffer_size=INFINITE_BUFFER_SIZE) anns_meta_dp = hint_sharding(anns_meta_dp) anns_meta_dp = hint_shuffling(anns_meta_dp) anns_dp = IterKeyZipper( anns_meta_dp, images_meta_dp, key_fn=getitem(0, "image_id"), ref_key_fn=getitem("id"), buffer_size=INFINITE_BUFFER_SIZE, ) dp = IterKeyZipper( anns_dp, images_dp, key_fn=getitem(1, "file_name"), ref_key_fn=path_accessor("name"), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper( dp, functools.partial(self._collate_and_decode_sample, annotations=config.annotations, decoder=decoder))
def _datapipe( self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: prepare_ann_fn: Callable if self._year == "2011": archive_dp, segmentations_dp = resource_dps images_dp, split_dp, image_files_dp, bounding_boxes_dp = Demultiplexer( archive_dp, 4, self._2011_classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE) image_files_dp = CSVParser(image_files_dp, dialect="cub200") image_files_map = dict( (image_id, rel_posix_path.rsplit("/", maxsplit=1)[1]) for image_id, rel_posix_path in image_files_dp) split_dp = CSVParser(split_dp, dialect="cub200") split_dp = Filter(split_dp, self._2011_filter_split) split_dp = Mapper(split_dp, getitem(0)) split_dp = Mapper(split_dp, image_files_map.get) bounding_boxes_dp = CSVParser(bounding_boxes_dp, dialect="cub200") bounding_boxes_dp = Mapper(bounding_boxes_dp, image_files_map.get, input_col=0) anns_dp = IterKeyZipper( bounding_boxes_dp, segmentations_dp, key_fn=getitem(0), ref_key_fn=self._2011_segmentation_key, keep_key=True, buffer_size=INFINITE_BUFFER_SIZE, ) prepare_ann_fn = self._2011_prepare_ann else: # self._year == "2010" split_dp, images_dp, anns_dp = resource_dps split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt")) split_dp = LineReader(split_dp, decode=True, return_path=False) split_dp = Mapper(split_dp, self._2010_split_key) anns_dp = Mapper(anns_dp, self._2010_anns_key) prepare_ann_fn = self._2010_prepare_ann split_dp = hint_shuffling(split_dp) split_dp = hint_sharding(split_dp) dp = IterKeyZipper( split_dp, images_dp, getitem(), path_accessor("name"), buffer_size=INFINITE_BUFFER_SIZE, ) dp = IterKeyZipper( dp, anns_dp, getitem(0), buffer_size=INFINITE_BUFFER_SIZE, ) return Mapper( dp, functools.partial(self._prepare_sample, prepare_ann_fn=prepare_ann_fn))