def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_scribbles = os.path.join( opt.dataroot, 'scribbles') #'pix2pix') #'scribbles' ) #'masks') self.dir_images = os.path.join( opt.dataroot, 'images') #os.path.join(opt.dataroot, 'images') self.classes = sorted( os.listdir(self.dir_images) ) # sorted so that the same order in all cases; check if you've to change this with other models self.num_classes = len(self.classes) self.scribble_paths = [] self.images_paths = [] for cl in self.classes: self.scribble_paths.append( sorted(make_dataset(os.path.join(self.dir_scribbles, cl)))) self.images_paths.append( sorted(make_dataset(os.path.join(self.dir_images, cl)))) self.cum_sizes = [] self.sizes = [] size = 0 for i in range(self.num_classes): size += len(self.scribble_paths[i]) self.cum_sizes.append(size) self.sizes.append(size) self.transform = get_transform(opt) self.sparse_transform = get_sparse_transform(opt) self.mask_transform = get_mask_transform(opt)
def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index (int) -- a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths A (tensor) -- an image in the input domain B (tensor) -- its corresponding image in the target domain A_paths (str) -- image paths B_paths (str) -- image paths """ A_path = self.A_paths[index % self.A_size] # make sure index is within then range if self.opt.serial_batches: # make sure index is within then range index_B = index % self.B_size else: # randomize the index for domain B to avoid fixed pairs. index_B = random.randint(0, self.B_size - 1) B_path = self.B_paths[index_B] A_img = Image.open(A_path).convert('RGB') B_img = Image.open(B_path).convert('RGB') # Apply image transformation # For FastCUT mode, if in finetuning phase (learning rate is decaying), # do not perform resize-crop data augmentation of CycleGAN. # print('current_epoch', self.current_epoch) is_finetuning = self.opt.isTrain and self.current_epoch > self.opt.n_epochs modified_opt = util.copyconf(self.opt, load_size=self.opt.crop_size if is_finetuning else self.opt.load_size) transform = get_transform(modified_opt) A = transform(A_img) B = transform(B_img) return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_A = os.path.join(opt.dataroot) self.A_paths = make_dataset(self.dir_A) self.A_paths = sorted(self.A_paths) self.transform = get_transform(opt)
def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc self.transform = get_transform(opt, grayscale=(input_nc == 1))
def __getitem__(self, index): # Label Image label_path = self.label_paths[index] label = Image.open(label_path) params = get_params(self.opt, label.size) transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) label_tensor = transform_label(label) * 255.0 label_tensor[label_tensor == 255] = self.opt.label_nc # 'unknown' is opt.label_nc # input image (real images) image_path = self.image_paths[index] assert self.paths_match(label_path, image_path), \ "The label_path %s and image_path %s don't match." % \ (label_path, image_path) image = Image.open(image_path) image = image.convert('RGB') transform_image = get_transform(self.opt, params) image_tensor = transform_image(image) # if using instance maps if self.opt.no_instance: instance_tensor = 0 else: instance_path = self.instance_paths[index] instance = Image.open(instance_path) if instance.mode == 'L': instance_tensor = transform_label(instance) * 255 instance_tensor = instance_tensor.long() else: instance_tensor = transform_label(instance) input_dict = {'label': label_tensor, 'instance': instance_tensor, 'image': image_tensor, 'path': image_path, } # Give subclasses a chance to modify the final output self.postprocess(input_dict) return input_dict
def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') self.A_paths = make_dataset(self.dir_A) self.B_paths = make_dataset(self.dir_B) self.A_paths = sorted(self.A_paths) self.B_paths = sorted(self.B_paths) self.A_size = len(self.A_paths) self.B_size = len(self.B_paths) self.transform = get_transform(opt)
def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index (int) -- a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths A (tensor) -- an image in the input domain B (tensor) -- its corresponding image in the target domain A_paths (str) -- image paths B_paths (str) -- image paths """ A_path = self.A_paths[0] B_path = self.B_paths[0] A_img = self.A_img B_img = self.B_img # apply image transformation if self.opt.phase == "train": param = {'scale_factor': self.zoom_levels_A[index], 'patch_index': self.patch_indices_A[index], 'flip': random.random() > 0.5} transform_A = get_transform(self.opt, params=param, method=Image.BILINEAR) A = transform_A(A_img) param = {'scale_factor': self.zoom_levels_B[index], 'patch_index': self.patch_indices_B[index], 'flip': random.random() > 0.5} transform_B = get_transform(self.opt, params=param, method=Image.BILINEAR) B = transform_B(B_img) else: transform = get_transform(self.opt, method=Image.BILINEAR) A = transform(A_img) B = transform(B_img) return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions A few things can be done here. - save the options (have been done in BaseDataset) - get image paths and meta information of the dataset. - define the image transformation. """ # save the option and dataset root BaseDataset.__init__(self, opt) # get the image paths of your dataset; self.image_paths = [ ] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root # define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function self.transform = get_transform(opt)