def __init__(self, root, transform=None, target_transform=None, download=False): self.root = os.path.expanduser(root) self.transform = transform self.target_transform = target_transform # Set up both the background and eval dataset omni_background = Omniglot(self.root, background=True, download=download) # Eval labels also start from 0. # It's important to add 964 to label values in eval so they don't overwrite background dataset. omni_evaluation = Omniglot(self.root, background=False, download=download, target_transform=lambda x: x + len(omni_background._characters)) self.dataset = ConcatDataset((omni_background, omni_evaluation)) self._bookkeeping_path = os.path.join(self.root, 'omniglot-bookkeeping.pkl')
from torchvision.datasets.omniglot import Omniglot Omniglot(root='./data', download=True) Omniglot(root='./data', download=True, background=False)
def omniglot(train=True): dataset = Omniglot(root=root, background=train) image_name, character_class = list(zip(*dataset._flat_character_images)) return image_name, character_class