def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_scribbles = os.path.join( opt.dataroot, 'scribbles') #'pix2pix') #'scribbles' ) #'masks') self.dir_images = os.path.join( opt.dataroot, 'images') #os.path.join(opt.dataroot, 'images') self.classes = sorted( os.listdir(self.dir_images) ) # sorted so that the same order in all cases; check if you've to change this with other models self.num_classes = len(self.classes) self.scribble_paths = [] self.images_paths = [] for cl in self.classes: self.scribble_paths.append( sorted(make_dataset(os.path.join(self.dir_scribbles, cl)))) self.images_paths.append( sorted(make_dataset(os.path.join(self.dir_images, cl)))) self.cum_sizes = [] self.sizes = [] size = 0 for i in range(self.num_classes): size += len(self.scribble_paths[i]) self.cum_sizes.append(size) self.sizes.append(size) self.transform = get_transform(opt) self.sparse_transform = get_sparse_transform(opt) self.mask_transform = get_mask_transform(opt)
def get_paths(self, opt): root = opt.dataroot phase = 'val' if opt.phase == 'test' else opt.phase label_dir = os.path.join(root, '%s_label' % phase) label_paths = make_dataset(label_dir, recursive=False, read_cache=True) image_dir = os.path.join(root, '%s_img' % phase) image_paths = make_dataset(image_dir, recursive=False, read_cache=True) instance_paths = [] return label_paths, image_paths, instance_paths
def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') self.A_paths = make_dataset(self.dir_A) self.B_paths = make_dataset(self.dir_B) self.A_paths = sorted(self.A_paths) self.B_paths = sorted(self.B_paths) self.A_size = len(self.A_paths) self.B_size = len(self.B_paths) self.transform = get_transform(opt)
def get_paths(self, opt): root = opt.dataroot phase = 'val' if opt.phase == 'test' else 'train' label_dir = os.path.join(root, 'gtFine', phase) label_paths_all = make_dataset(label_dir, recursive=True) label_paths = [p for p in label_paths_all if p.endswith('_labelIds.png')] image_dir = os.path.join(root, 'leftImg8bit', phase) image_paths = make_dataset(image_dir, recursive=True) if not opt.no_instance: instance_paths = [p for p in label_paths_all if p.endswith('_instanceIds.png')] else: instance_paths = [] return label_paths, image_paths, instance_paths
def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.dir_A = os.path.join(opt.dataroot, 'trainA') # create a path '/path/to/data/trainA' self.dir_B = os.path.join(opt.dataroot, 'trainB') # create a path '/path/to/data/trainB' if os.path.exists(self.dir_A) and os.path.exists(self.dir_B): self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' self.A_size = len(self.A_paths) # get the size of dataset A self.B_size = len(self.B_paths) # get the size of dataset B assert len(self.A_paths) == 1 and len(self.B_paths) == 1,\ "SingleImageDataset class should be used with one image in each domain" A_img = Image.open(self.A_paths[0]).convert('RGB') B_img = Image.open(self.B_paths[0]).convert('RGB') print("Image sizes %s and %s" % (str(A_img.size), str(B_img.size))) self.A_img = A_img self.B_img = B_img # In single-image translation, we augment the data loader by applying # random scaling. Still, we design the data loader such that the # amount of scaling is the same within a minibatch. To do this, # we precompute the random scaling values, and repeat them by |batch_size|. A_zoom = 1 / self.opt.random_scale_max zoom_levels_A = np.random.uniform(A_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2)) self.zoom_levels_A = np.reshape(np.tile(zoom_levels_A, (1, opt.batch_size, 1)), [-1, 2]) B_zoom = 1 / self.opt.random_scale_max zoom_levels_B = np.random.uniform(B_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2)) self.zoom_levels_B = np.reshape(np.tile(zoom_levels_B, (1, opt.batch_size, 1)), [-1, 2]) # While the crop locations are randomized, the negative samples should # not come from the same location. To do this, we precompute the # crop locations with no repetition. self.patch_indices_A = list(range(len(self))) random.shuffle(self.patch_indices_A) self.patch_indices_B = list(range(len(self))) random.shuffle(self.patch_indices_B)
def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA' self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB' if opt.phase == "test" and not os.path.exists(self.dir_A) \ and os.path.exists(os.path.join(opt.dataroot, "valA")): self.dir_A = os.path.join(opt.dataroot, "valA") self.dir_B = os.path.join(opt.dataroot, "valB") self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' self.A_size = len(self.A_paths) # get the size of dataset A self.B_size = len(self.B_paths) # get the size of dataset B
def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_A = os.path.join(opt.dataroot) self.A_paths = make_dataset(self.dir_A) self.A_paths = sorted(self.A_paths) self.transform = get_transform(opt)
def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc self.transform = get_transform(opt, grayscale=(input_nc == 1))
def __init__(self, opt, mode='train'): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ super(UnalignedDataset, self).__init__() self.opt = opt self.dir_A = os.path.join(opt.dataroot, mode + 'A') # create a path '/path/to/data/trainA' self.dir_B = os.path.join(opt.dataroot, mode + 'B') # create a path '/path/to/data/trainB' self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' self.A_size = len(self.A_paths) # get the size of dataset A self.B_size = len(self.B_paths) # get the size of dataset B btoA = self.opt.direction == 'BtoA' input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1)) self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def get_paths(self, opt): label_dir = opt.label_dir label_paths = make_dataset(label_dir, recursive=False, read_cache=True) image_dir = opt.image_dir image_paths = make_dataset(image_dir, recursive=False, read_cache=True) if len(opt.instance_dir) > 0: instance_dir = opt.instance_dir instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True) else: instance_paths = [] assert len(label_paths) == len( image_paths ), "The #images in %s and %s do not match. Is there something wrong?" return label_paths, image_paths, instance_paths
def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_AB = os.path.join(opt.dataroot, opt.phase) self.AB_paths = sorted(make_dataset(self.dir_AB)) assert (opt.resize_or_crop == 'resize_and_crop') transform_list = [ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ] self.transform = transforms.Compose(transform_list)
def get_paths(self, opt): root = opt.dataroot phase = 'val' if opt.phase == 'test' else 'train' all_images = make_dataset(root, recursive=True, read_cache=False, write_cache=False) image_paths = [] label_paths = [] for p in all_images: if '_%s_' % phase not in p: continue if p.endswith('.jpg'): image_paths.append(p) elif p.endswith('.png'): label_paths.append(p) instance_paths = [] # don't use instance map for ade20k return label_paths, image_paths, instance_paths
def get_paths(self, opt): root = opt.dataroot phase = 'val' if opt.phase == 'test' else opt.phase label_dir = os.path.join(root, '%s_label' % phase) label_paths = make_dataset(label_dir, recursive=False, read_cache=True) if not opt.coco_no_portraits and opt.isTrain: label_portrait_dir = os.path.join(root, '%s_label_portrait' % phase) if os.path.isdir(label_portrait_dir): label_portrait_paths = make_dataset(label_portrait_dir, recursive=False, read_cache=True) label_paths += label_portrait_paths image_dir = os.path.join(root, '%s_img' % phase) image_paths = make_dataset(image_dir, recursive=False, read_cache=True) if not opt.coco_no_portraits and opt.isTrain: image_portrait_dir = os.path.join(root, '%s_img_portrait' % phase) if os.path.isdir(image_portrait_dir): image_portrait_paths = make_dataset(image_portrait_dir, recursive=False, read_cache=True) image_paths += image_portrait_paths if not opt.no_instance: instance_dir = os.path.join(root, '%s_inst' % phase) instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True) if not opt.coco_no_portraits and opt.isTrain: instance_portrait_dir = os.path.join( root, '%s_inst_portrait' % phase) if os.path.isdir(instance_portrait_dir): instance_portrait_paths = make_dataset( instance_portrait_dir, recursive=False, read_cache=True) instance_paths += instance_portrait_paths else: instance_paths = [] return label_paths, image_paths, instance_paths