def __getitem__(self, index): if self.mode == "train": img_name = os.path.split(self.train_input_files[index % len(self.train_input_files)])[-1] img_input = cv2.imread(self.train_input_files[index % len(self.train_input_files)],-1) if len(self.train_input_files) == len(self.train_target_files): img_exptC = Image.open(self.train_target_files[index % len(self.train_target_files)]) if self.use_mask: img_mask = Image.open(os.path.join(self.root, "train/masks/" + img_name[:-4] + ".png")) else: split_name = img_name.split('_') if len(split_name) == 2: img_exptC = Image.open(os.path.join(self.root, "train/target_" + self.retoucher + '/' + img_name)) if self.use_mask: img_mask = Image.open(os.path.join(self.root, "train/masks/" + img_name[:-4] + ".png")) else: img_exptC = Image.open( os.path.join(self.root, "train/target_" + self.retoucher + '/' + split_name[0] + "_" + split_name[1] + ".tif")) if self.use_mask: img_mask = Image.open( os.path.join(self.root, "train/masks/" + split_name[0] + "_" + split_name[1] + ".png")) elif self.mode == "test": img_name = os.path.split(self.test_input_files[index % len(self.test_input_files)])[-1] img_input = cv2.imread(self.test_input_files[index % len(self.test_input_files)],-1) img_exptC = Image.open(self.test_target_files[index % len(self.test_target_files)]) if self.use_mask: img_mask = Image.open(self.test_mask_files[index % len(self.test_mask_files)]) img_input = np.array(img_input) img_input = img_input[:, :, [2, 1, 0]] if self.mode == "train": ratio_H = np.random.uniform(0.6, 1.0) ratio_W = np.random.uniform(0.6, 1.0) W,H = img_exptC._size crop_h = round(H * ratio_H) crop_w = round(W * ratio_W) i, j, h, w = transforms.RandomCrop.get_params(img_exptC, output_size=(crop_h, crop_w)) img_input = TF_x.resized_crop(img_input, i, j, h, w, (448, 448)) img_exptC = TF.resized_crop(img_exptC, i, j, h, w, (448, 448)) if self.use_mask: img_mask = TF.resized_crop(img_mask, i, j, h, w, (448, 448)) if np.random.random() > 0.5: img_input = TF_x.hflip(img_input) img_exptC = TF.hflip(img_exptC) if self.use_mask: img_mask = TF.hflip(img_mask) img_input = TF_x.to_tensor(img_input) img_exptC = TF.to_tensor(img_exptC) if self.use_mask: img_mask = TF.to_tensor(img_mask) if self.use_mask: return {"A_input": img_input, "A_exptC": img_exptC, "input_name": img_name, "mask": img_mask} else: return {"A_input": img_input, "A_exptC": img_exptC, "input_name": img_name}
def __getitem__(self, index): if self.mode == "train": img_name = os.path.split( self.set1_input_files[index % len(self.set1_input_files)])[-1] img_input = cv2.imread( self.set1_input_files[index % len(self.set1_input_files)], -1) img_exptC = Image.open( self.set1_expert_files[index % len(self.set1_expert_files)]) elif self.mode == "test": img_name = os.path.split( self.test_input_files[index % len(self.test_input_files)])[-1] img_input = cv2.imread( self.test_input_files[index % len(self.test_input_files)], -1) img_exptC = Image.open( self.test_expert_files[index % len(self.test_expert_files)]) img_input = np.array(img_input) #img_input = np.array(cv2.cvtColor(img_input,cv2.COLOR_BGR2RGB)) if self.mode == "train": ratio = np.random.uniform(0.6, 1.0) W, H = img_exptC._size crop_h = round(H * ratio) crop_w = round(W * ratio) i, j, h, w = transforms.RandomCrop.get_params(img_exptC, output_size=(crop_h, crop_w)) try: img_input = TF_x.crop(img_input, i, j, h, w) except: print(crop_h, crop_w, img_input.shape()) img_exptC = TF.crop(img_exptC, i, j, h, w) if np.random.random() > 0.5: img_input = TF_x.hflip(img_input) img_exptC = TF.hflip(img_exptC) a = np.random.uniform(0.6, 1.4) img_input = TF_x.adjust_brightness(img_input, a) #a = np.random.uniform(0.8,1.2) #img_input = TF_x.adjust_saturation(img_input,a) img_input = TF_x.to_tensor(img_input) img_exptC = TF.to_tensor(img_exptC) return { "A_input": img_input, "A_exptC": img_exptC, "input_name": img_name }
def __getitem__(self, index): img_name = os.path.split( self.test_input_files[index % len(self.test_input_files)])[-1] img_input = cv2.imread( self.test_input_files[index % len(self.test_input_files)], -1) img_input = np.array(img_input) img_input = img_input[:, :, [2, 1, 0]] img_input = TF_x.to_tensor(img_input) return {"A_input": img_input, "input_name": img_name}
def __getitem__(self, index): if self.mode == "train": img_name = os.path.split( self.set1_input_files[index % len(self.set1_input_files)])[-1] img_input = cv2.imread( self.set1_input_files[index % len(self.set1_input_files)], -1) img_exptC = Image.open( self.set1_expert_files[index % len(self.set1_expert_files)]) seed = random.randint(1, len(self.set2_expert_files)) img2 = Image.open( self.set2_expert_files[(index + seed) % len(self.set2_expert_files)]) elif self.mode == "test": img_name = os.path.split( self.test_input_files[index % len(self.test_input_files)])[-1] img_input = cv2.imread( self.test_input_files[index % len(self.test_input_files)], -1) img_exptC = Image.open( self.test_expert_files[index % len(self.test_expert_files)]) img2 = img_exptC img_input = np.array(img_input) #img_input = np.array(cv2.cvtColor(img_input,cv2.COLOR_BGR2RGB)) if self.mode == "train": ratio = np.random.uniform(0.6, 1.0) W, H = img_exptC._size crop_h = round(H * ratio) crop_w = round(W * ratio) W2, H2 = img2._size crop_h = min(crop_h, H2) crop_w = min(crop_w, W2) i, j, h, w = transforms.RandomCrop.get_params(img_exptC, output_size=(crop_h, crop_w)) img_input = TF_x.crop(img_input, i, j, h, w) img_exptC = TF.crop(img_exptC, i, j, h, w) i, j, h, w = transforms.RandomCrop.get_params(img2, output_size=(crop_h, crop_w)) img2 = TF.crop(img2, i, j, h, w) if np.random.random() > 0.5: img_input = TF_x.hflip(img_input) img_exptC = TF.hflip(img_exptC) if np.random.random() > 0.5: img2 = TF.hflip(img2) a = np.random.uniform(0.8, 1.2) img_input = TF_x.adjust_brightness(img_input, a) img_input = TF_x.to_tensor(img_input) img_exptC = TF.to_tensor(img_exptC) img2 = TF.to_tensor(img2) return { "A_input": img_input, "A_exptC": img_exptC, "B_exptC": img2, "input_name": img_name }
LUT = pred[0] * LUT0.LUT + pred[1] * LUT1.LUT + pred[ 2] * LUT2.LUT #+ pred[3] * LUT3.LUT + pred[4] * LUT4.LUT return LUT # ---------- # test # ---------- # read image and transform to tensor if opt.input_color_space == 'sRGB': img = Image.open(opt.image_path) img = TF.to_tensor(img).type(Tensor) elif opt.input_color_space == 'XYZ': img = cv2.imread(opt.image_path, -1) img = np.array(img) img = TF_x.to_tensor(img).type(Tensor) img = img.unsqueeze(0) LUT = generate_LUT(img) # generate image result = trilinear_(LUT, img) # save image ndarr = result.squeeze().mul_(255).add_(0.5).clamp_(0, 255).permute( 1, 2, 0).to('cpu', torch.uint8).numpy() im = Image.fromarray(ndarr) im.save('%s/result.jpg' % opt.output_dir, quality=95)