def b_Bicubic(variable, scale): B, C, H, W = variable.size() H_new = int(H / scale) W_new = int(W / scale) tensor_v = variable.view((B, C, H, W)) re_tensor = imresize(tensor_v, 1 / scale) return re_tensor
def b_CPUVar_Bicubic(variable, scale): B, C, H, W = variable.size() H_new = int(H / scale) W_new = int(W / scale) tensor_v = variable.view((B, C, H, W)) re_tensor = torch.zeros((B, C, H_new, W_new)) for i in range(B): re_tensor[i] = imresize(tensor_v[i], 1 / scale) return re_tensor
idx = 0 for path in glob.glob(test_img_folder + '**/*.TIF'): idx += 1 basename = os.path.basename(path) base = os.path.splitext(basename)[0] print(idx, base) # read image img = cv2.imread(path, cv2.IMREAD_UNCHANGED) img = modcrop(img, 8) if img.ndim == 2: img = np.expand_dims(img, axis=2) img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float() # matlab imresize # the implementation is slower than matlab, can use matlab to generate first img_LR = imresize(img / 255, 1 / 4, antialiasing=True) if args.thirty: img = imresize(img_LR, 8, antialiasing=True) * 255 # from 256 to 512 else: img = imresize(img_LR, 4, antialiasing=True) * 255 img[0] -= 103.939 img[1] -= 116.779 img[2] -= 123.68 img = img.unsqueeze(0) img = img.cuda() output = seg_model(img).detach().float().cpu().squeeze_() # prob
def __getitem__(self, index): HR_path, LR_path = None, None scale = self.opt['scale'] HR_size = self.opt['HR_size'] # get HR image HR_path = self.paths_HR[index] img_HR = util.read_img(self.HR_env, HR_path) # modcrop in the validation / test phase if self.opt['phase'] != 'train': img_HR = util.modcrop(img_HR, scale) # change color space if necessary if self.opt['color']: img_HR = util.channel_convert(img_HR.shape[2], self.opt['color'], [img_HR])[0] if self.opt['resize'] < 1: print('Resize by %.2f' % self.opt['resize']) img_HR = cv2.resize(img_HR, None, fx=self.opt['resize'], fy=self.opt['resize'], interpolation=cv2.INTER_LINEAR) # print(img_HR.shape) # get LR image if self.paths_LR: LR_path = self.paths_LR[index] img_LR = util.read_img(self.LR_env, LR_path) else: # down-sampling on-the-fly # randomly scale during training if self.opt['phase'] == 'train': random_scale = random.choice(self.random_scale_list) H_s, W_s, _ = img_HR.shape def _mod(n, random_scale, scale, thres): rlt = int(n * random_scale) rlt = (rlt // scale) * scale return thres if rlt < thres else rlt H_s = _mod(H_s, random_scale, scale, HR_size) W_s = _mod(W_s, random_scale, scale, HR_size) img_HR = cv2.resize(np.copy(img_HR), (W_s, H_s), interpolation=cv2.INTER_LINEAR) # force to 3 channels if img_HR.ndim == 2: img_HR = cv2.cvtColor(img_HR, cv2.COLOR_GRAY2BGR) H, W, _ = img_HR.shape if self.opt['downsample'] == 'cubic': img_LR = cv2.resize(img_HR, dsize=None, fx=1.0/scale, fy=1.0/scale, interpolation=cv2.INTER_CUBIC) elif self.opt['downsample'] == 'numpy': img_LR = util.imresize_np(img_HR, 1 / scale, True) elif self.opt['downsample'] == 'linear': img_LR = cv2.resize(img_HR, dsize=None, fx=1.0 / scale, fy=1.0 / scale, interpolation=cv2.INTER_LINEAR) if img_LR.ndim == 2: img_LR = np.expand_dims(img_LR, axis=2) # print(img_HR.shape) # print(img_LR.shape) if self.opt['phase'] == 'train': # if the image size is too small H, W, _ = img_HR.shape if H < HR_size or W < HR_size or not self.opt['crop']: img_HR = cv2.resize( np.copy(img_HR), (HR_size, HR_size), interpolation=cv2.INTER_LINEAR) # using matlab imresize if self.opt['downsample'] == 'cubic': img_LR = cv2.resize(img_HR, dsize=None, fx=1.0 / scale, fy=1.0 / scale, interpolation=cv2.INTER_CUBIC) elif self.opt['downsample'] == 'numpy': img_LR = util.imresize_np(img_HR, 1 / scale, True) elif self.opt['downsample'] == 'linear': img_LR = cv2.resize(img_HR, dsize=None, fx=1.0 / scale, fy=1.0 / scale, interpolation=cv2.INTER_LINEAR) # # img_LR = util.imresize_np(img_HR, 1 / scale, True) if img_LR.ndim == 2: img_LR = np.expand_dims(img_LR, axis=2) H, W, C = img_LR.shape LR_size = HR_size // scale # randomly crop if self.opt['crop']: rnd_h = random.randint(0, max(0, H - LR_size)) rnd_w = random.randint(0, max(0, W - LR_size)) img_LR = img_LR[rnd_h:rnd_h + LR_size, rnd_w:rnd_w + LR_size, :] rnd_h_HR, rnd_w_HR = int(rnd_h * scale), int(rnd_w * scale) img_HR = img_HR[rnd_h_HR:rnd_h_HR + HR_size, rnd_w_HR:rnd_w_HR + HR_size, :] # augmentation - flip, rotate img_LR, img_HR = util.augment([img_LR, img_HR], self.opt['use_flip'], \ self.opt['use_rot']) if self.LR_random_fuzzy is not None: random_fuzzy = random.choice(self.LR_random_fuzzy) assert self.opt['downsample'] == 'numpy' init_LR = np.copy(img_LR) img_LR = util.imresize(img_LR, random_fuzzy, True) img_LR = util.imresize(img_LR, 1 / random_fuzzy, True) if img_LR.shape[0] != LR_size or img_LR.shape[1] != LR_size: print('Warning: LR shape changed after random fuzzy. Using initial one.', img_LR.shape[0], img_LR.shape[1], LR_size) img_LR = init_LR # change color space if necessary if self.opt['color']: img_LR = util.channel_convert(C, self.opt['color'], [img_LR])[0] # TODO during val no definetion # BGR to RGB, HWC to CHW, numpy to tensor if img_HR.shape[2] == 3: img_HR = img_HR[:, :, [2, 1, 0]] img_LR = img_LR[:, :, [2, 1, 0]] img_HR = torch.from_numpy(np.ascontiguousarray(np.transpose(img_HR, (2, 0, 1)))).float() img_LR = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LR, (2, 0, 1)))).float() # return: 0-1 if LR_path is None: LR_path = HR_path return {'LR': img_LR, 'HR': img_HR, 'LR_path': LR_path, 'HR_path': HR_path}
idx = 0 for path in glob.glob(test_img_folder + '/*'): idx += 1 basename = os.path.basename(path) base = os.path.splitext(basename)[0] print(idx, base) # read image img = cv2.imread(path, cv2.IMREAD_UNCHANGED) img = modcrop(img, 8) img = img * 1.0 / 255 if img.ndim == 2: img = np.expand_dims(img, axis=2) img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float() # matlab imresize img_LR = imresize(img, 1 / 4, antialiasing=True) img_LR = img_LR.unsqueeze(0) img_LR = img_LR.cuda() # read seg seg = torch.load(os.path.join(test_prob_path, base + '_bic.pth')) seg = seg.unsqueeze(0) # change probability # seg.fill_(0) # seg[:,5].fill_(1) seg = seg.cuda() output = model((img_LR, seg)).data output = util.tensor2img_np(output.squeeze()) util.save_img_np(output, os.path.join(save_result_path, base + '_rlt.png'))
def __getitem__(self, index): if self.opt["data_type"] == "lmdb": if (self.GT_env is None) or (self.LR_env is None): self._init_lmdb() GT_path, LR_path = None, None scale = self.opt["scale"] GT_size = self.opt["GT_size"] LR_size = self.opt["LR_size"] # get GT image GT_path = self.GT_paths[index] if self.opt["data_type"] == "lmdb": resolution = [int(s) for s in self.GT_sizes[index].split("_")] else: resolution = None img_GT = util.read_img( self.GT_env, GT_path, resolution) # return: Numpy float32, HWC, BGR, [0,1] # modcrop in the validation / test phase if self.opt["phase"] != "train": img_GT = util.modcrop(img_GT, scale) # get LR image if self.LR_paths: # LR exist LR_path = self.LR_paths[index] if self.opt["data_type"] == "lmdb": resolution = [int(s) for s in self.LR_sizes[index].split("_")] else: resolution = None img_LR = util.read_img(self.LR_env, LR_path, resolution) else: # down-sampling on-the-fly # randomly scale during training if self.opt["phase"] == "train": random_scale = random.choice(self.random_scale_list) H_s, W_s, _ = img_GT.shape def _mod(n, random_scale, scale, thres): rlt = int(n * random_scale) rlt = (rlt // scale) * scale return thres if rlt < thres else rlt H_s = _mod(H_s, random_scale, scale, GT_size) W_s = _mod(W_s, random_scale, scale, GT_size) img_GT = cv2.resize(np.copy(img_GT), (W_s, H_s), interpolation=cv2.INTER_LINEAR) # force to 3 channels if img_GT.ndim == 2: img_GT = cv2.cvtColor(img_GT, cv2.COLOR_GRAY2BGR) H, W, _ = img_GT.shape # using matlab imresize img_LR = util.imresize(img_GT, 1 / scale, True) if img_LR.ndim == 2: img_LR = np.expand_dims(img_LR, axis=2) if self.opt["phase"] == "train": H, W, C = img_LR.shape assert LR_size == GT_size // scale, "GT size does not match LR size" # randomly crop rnd_h = random.randint(0, max(0, H - LR_size)) rnd_w = random.randint(0, max(0, W - LR_size)) img_LR = img_LR[rnd_h:rnd_h + LR_size, rnd_w:rnd_w + LR_size, :] rnd_h_GT, rnd_w_GT = int(rnd_h * scale), int(rnd_w * scale) img_GT = img_GT[rnd_h_GT:rnd_h_GT + GT_size, rnd_w_GT:rnd_w_GT + GT_size, :] # augmentation - flip, rotate img_LR, img_GT = util.augment( [img_LR, img_GT], self.opt["use_flip"], self.opt["use_rot"], self.opt["mode"], ) # change color space if necessary if self.opt["color"]: img_LR = util.channel_convert( C, self.opt["color"], [img_LR])[0] # TODO during val no definition img_GT = util.channel_convert(img_GT.shape[2], self.opt["color"], [img_GT])[0] # BGR to RGB, HWC to CHW, numpy to tensor if img_GT.shape[2] == 3: img_GT = img_GT[:, :, [2, 1, 0]] img_LR = img_LR[:, :, [2, 1, 0]] img_GT = torch.from_numpy( np.ascontiguousarray(np.transpose(img_GT, (2, 0, 1)))).float() img_LR = torch.from_numpy( np.ascontiguousarray(np.transpose(img_LR, (2, 0, 1)))).float() if LR_path is None: LR_path = GT_path return { "LQ": img_LR, "GT": img_GT, "LQ_path": LR_path, "GT_path": GT_path }
def generate_mod_LR_bic(): # set parameters up_scale = 4 mod_scale = 4 # set data dir sourcedir = "/data/DIV2K_Flickr2K/source/" savedir = "/data/DIV2K_Flickr2K/" saveHRpath = os.path.join(savedir, "HR", "x" + str(mod_scale)) saveLRpath = os.path.join(savedir, "LR", "x" + str(up_scale)) saveBicpath = os.path.join(savedir, "Bic", "x" + str(up_scale)) if not os.path.isdir(sourcedir): print("Error: No source data found") exit(0) if not os.path.isdir(savedir): os.mkdir(savedir) if not os.path.isdir(os.path.join(savedir, "HR")): os.mkdir(os.path.join(savedir, "HR")) if not os.path.isdir(os.path.join(savedir, "LR")): os.mkdir(os.path.join(savedir, "LR")) if not os.path.isdir(os.path.join(savedir, "Bic")): os.mkdir(os.path.join(savedir, "Bic")) if not os.path.isdir(saveHRpath): os.mkdir(saveHRpath) else: print("It will cover " + str(saveHRpath)) if not os.path.isdir(saveLRpath): os.mkdir(saveLRpath) else: print("It will cover " + str(saveLRpath)) if not os.path.isdir(saveBicpath): os.mkdir(saveBicpath) else: print("It will cover " + str(saveBicpath)) filepaths = [f for f in os.listdir(sourcedir) if f.endswith(".png")] num_files = len(filepaths) # prepare data with augementation for i in range(num_files): filename = filepaths[i] print("No.{} -- Processing {}".format(i, filename)) # read image image = cv2.imread(os.path.join(sourcedir, filename)) width = int(np.floor(image.shape[1] / mod_scale)) height = int(np.floor(image.shape[0] / mod_scale)) # modcrop if len(image.shape) == 3: image_HR = image[0:mod_scale * height, 0:mod_scale * width, :] else: image_HR = image[0:mod_scale * height, 0:mod_scale * width] # LR image_LR = imresize(image_HR, 1 / up_scale, True) # bic image_Bic = imresize(image_LR, up_scale, True) cv2.imwrite(os.path.join(saveHRpath, filename), image_HR) cv2.imwrite(os.path.join(saveLRpath, filename), image_LR) cv2.imwrite(os.path.join(saveBicpath, filename), image_Bic)
def generate_mod_LR_bic(): # set parameters up_scale = 4 mod_scale = 4 # set data dir sourcedir = "/data/Set5/source/" savedir = "/data/Set5/" # load PCA matrix of enough kernel print("load PCA matrix") pca_matrix = torch.load( "../../pca_matrix.pth", map_location=lambda storage, loc: storage ) print("PCA matrix shape: {}".format(pca_matrix.shape)) degradation_setting = { "random_kernel": False, "code_length": 10, "ksize": 21, "pca_matrix": pca_matrix, "scale": up_scale, "cuda": True, "rate_iso", 1.0 } # set random seed util.set_random_seed(0) saveHRpath = os.path.join(savedir, "HR", "x" + str(mod_scale)) saveLRpath = os.path.join(savedir, "LR", "x" + str(up_scale)) saveBicpath = os.path.join(savedir, "Bic", "x" + str(up_scale)) saveLRblurpath = os.path.join(savedir, "LRblur", "x" + str(up_scale)) if not os.path.isdir(sourcedir): print("Error: No source data found") exit(0) if not os.path.isdir(savedir): os.mkdir(savedir) if not os.path.isdir(os.path.join(savedir, "HR")): os.mkdir(os.path.join(savedir, "HR")) if not os.path.isdir(os.path.join(savedir, "LR")): os.mkdir(os.path.join(savedir, "LR")) if not os.path.isdir(os.path.join(savedir, "Bic")): os.mkdir(os.path.join(savedir, "Bic")) if not os.path.isdir(os.path.join(savedir, "LRblur")): os.mkdir(os.path.join(savedir, "LRblur")) if not os.path.isdir(saveHRpath): os.mkdir(saveHRpath) else: print("It will cover " + str(saveHRpath)) if not os.path.isdir(saveLRpath): os.mkdir(saveLRpath) else: print("It will cover " + str(saveLRpath)) if not os.path.isdir(saveBicpath): os.mkdir(saveBicpath) else: print("It will cover " + str(saveBicpath)) if not os.path.isdir(saveLRblurpath): os.mkdir(saveLRblurpath) else: print("It will cover " + str(saveLRblurpath)) filepaths = sorted([f for f in os.listdir(sourcedir) if f.endswith(".png")]) print(filepaths) num_files = len(filepaths) # kernel_map_tensor = torch.zeros((num_files, 1, 10)) # each kernel map: 1*10 # prepare data with augementation for i in range(num_files): filename = filepaths[i] print("No.{} -- Processing {}".format(i, filename)) # read image image = cv2.imread(os.path.join(sourcedir, filename)) width = int(np.floor(image.shape[1] / mod_scale)) height = int(np.floor(image.shape[0] / mod_scale)) # modcrop if len(image.shape) == 3: image_HR = image[0 : mod_scale * height, 0 : mod_scale * width, :] else: image_HR = image[0 : mod_scale * height, 0 : mod_scale * width] # LR_blur, by random gaussian kernel img_HR = util.img2tensor(image_HR) C, H, W = img_HR.size() for sig in np.linspace(1.8, 3.2, 8): prepro = util.SRMDPreprocessing(sig=sig, **degradation_setting) LR_img, ker_map = prepro(img_HR.view(1, C, H, W)) image_LR_blur = util.tensor2img(LR_img) cv2.imwrite(os.path.join(saveLRblurpath, 'sig{}_{}'.format(sig,filename)), image_LR_blur) cv2.imwrite(os.path.join(saveHRpath, 'sig{}_{}'.format(sig,filename)), image_HR) # LR image_LR = imresize(image_HR, 1 / up_scale, True) # bic image_Bic = imresize(image_LR, up_scale, True) # cv2.imwrite(os.path.join(saveHRpath, filename), image_HR) cv2.imwrite(os.path.join(saveLRpath, filename), image_LR) cv2.imwrite(os.path.join(saveBicpath, filename), image_Bic) # kernel_map_tensor[i] = ker_map # save dataset corresponding kernel maps # torch.save(kernel_map_tensor, './Set5_sig2.6_kermap.pth') print("Image Blurring & Down smaple Done: X" + str(up_scale))