def divide(flags): """Divide given images to small patches. Challenge: can be used to all challenges. Args: input_dir: path of images to be divided patch: dividing patch size stride: dividing stride (usually smaller than `patch`) """ def _divide(img: Image, stride: int, size: int) -> list: w = img.width h = img.height img = img_to_array(img, data_format='channels_last') patches = [] img = np.pad(img, [[0, size - h % stride or stride], [0, size - w % stride or stride], [0, 0]], mode='reflect') size - w % stride for i in np.arange(0, h, stride): for j in np.arange(0, w, stride): patches.append(img[i:i + size, j:j + size]) return patches save_dir = Path(flags.save_dir).resolve() save_dir.mkdir(exist_ok=True, parents=True) files = sorted(Path(flags.input_dir).glob("*.png")) print(" [*] Dividing...\n") for f in tqdm.tqdm(files, ascii=True): pf = _divide(Image.open(f), flags.stride, flags.patch) for i, p in enumerate(pf): array_to_img(p, 'RGB', data_format='channels_last').save( f"{save_dir}/{f.stem}_{i:04d}.png")
def rsr(): save_dir = Path(FLAGS.save_dir) save_dir.mkdir(exist_ok=True, parents=True) if FLAGS.ref_dir: files = sorted(Path(FLAGS.ref_dir).glob("*.png")) if FLAGS.results: print(" [!] Combining...\n") results = Path(FLAGS.results) for f in tqdm.tqdm(files, ascii=True): sub = list(results.glob("{}_????.png".format(f.stem))) sub.sort(key=lambda x: int(x.stem[-4:])) sub = [Image.open(s) for s in sub] img = combine(Image.open(f), sub, FLAGS.stride) img.save("{}/{}_sr.png".format(save_dir, f.stem)) else: print(" [!] Dividing...\n") for f in tqdm.tqdm(files, ascii=True): pf = divide(Image.open(f), FLAGS.stride, FLAGS.patch_size) for i, p in enumerate(pf): array_to_img(p, 'RGB').save("{}/{}_{:04d}.png".format( save_dir, f.stem, i))
def combine(ref: Image, sub: list, stride) -> Image: w = ref.width h = ref.height blank = np.zeros([h, w, 3], 'float32') count = np.zeros([h, w, 1]) k = 0 for i in np.arange(0, h, stride): for j in np.arange(0, w, stride): p = sub[k] k += 1 try: blank[i:i + p.height, j:j + p.width] += img_to_array(p) except ValueError: blank[i:i + p.height, j:j + p.width] += img_to_array(p)[:h - i, :w - j] count[i:i + p.height, j:j + p.width] += 1 blank /= count return array_to_img(blank, 'RGB')
def main(*args): for name in FLAGS.dataset: name = name.upper() d = DATASETS.get(name) if not d: tf.logging.error('Could not find ' + name) return # calc mean [R G B] loader = QuickLoader(1, d, 'train', 1, convert_to='RGB') colors = [] for img, _, _ in loader.make_one_shot_iterator(shard=8): rgb = np.reshape(img, [-1, 3]) colors.append(rgb) colors = np.concatenate(colors) mean_colors = colors.mean(axis=0, keepdims=True) SAVE[f'{name}_MEAN'] = mean_colors if FLAGS.std: std_colors = colors.std(axis=0, keepdims=True) SAVE[f'{name}_STD'] = std_colors if FLAGS.fid: # activation of pool 3 inception_pb = FID.check_or_download_inception(FLAGS.model_path) FID.create_inception_graph(inception_pb) imgs = [] for img, _, _ in loader.make_one_shot_iterator(shard=8): imgs += [ imresize(array_to_img(img[0], 'RGB'), 0, size=[299, 299]) ] imgs = np.stack(imgs) with tf.Session() as sess: acts = FID.get_activations(imgs, sess) mu = acts.mean(axis=0) sigma = np.cov(acts, rowvar=False) SAVE[f'{name}_FID_MU'] = mu SAVE[f'{name}_FID_SIGMA'] = sigma np.savez_compressed(FLAGS.output, **SAVE)
def denoise(): save_dir = Path(FLAGS.save_dir) save_dir.mkdir(exist_ok=True, parents=True) if FLAGS.train_dir: train_dir = Path(FLAGS.train_dir) train_gt = sorted(train_dir.rglob('*GT_SRGB_010.PNG')) train_noisy = sorted(train_dir.rglob('*NOISY_SRGB_010.PNG')) assert len(train_gt) == len(train_noisy) writer = tf.io.TFRecordWriter( "{}/ntire_denoise_x{}-train.tfrecords".format(FLAGS.save_dir, FLAGS.scale)) num_each = FLAGS.num // len(train_gt) for gt, noisy in zip(train_gt, train_noisy): print(gt.stem, noisy.stem) name = gt.stem[:4] image_gt = Image.open(gt) image_noisy = Image.open(noisy) _w, _h = image_gt.width, image_gt.height _pw = _ph = FLAGS.patch_size x = np.random.randint(0, _w - _pw + 1, size=num_each) y = np.random.randint(0, _h - _ph + 1, size=num_each) box = [(_x, _y, _x + _pw, _y + _ph) for _x, _y in zip(x, y)] patches_gt = [np.asarray(image_gt.crop(b)) for b in box] patches_noisy = [np.asarray(image_noisy.crop(b)) for b in box] if FLAGS.augment: ops = np.random.randint(0, 2, size=[num_each, 3]) patches_gt = [_augment(p, op) for p, op in zip(patches_gt, ops)] patches_noisy = [_augment(p, op) for p, op in zip(patches_noisy, ops)] for i, patches in tqdm.tqdm(enumerate(zip(patches_gt, patches_noisy)), total=num_each, ascii=True): hr, noise = patches label = "{}_{}".format(name, i).encode() with io.BytesIO() as fp: hr = array_to_img(hr, 'RGB') hr.save(fp, format='png') fp.seek(0) hr_png = fp.read() with io.BytesIO() as fp: lr = hr.resize([hr.width // FLAGS.scale, hr.height // FLAGS.scale], Image.BICUBIC) lr.save(fp, format='png') fp.seek(0) lr_png = fp.read() with io.BytesIO() as fp: array_to_img(noise, 'RGB').save(fp, format='png') fp.seek(0) noisy_png = fp.read() make_tensor_label_records( [hr_png, lr_png, label, noisy_png], ["image/hr", "image/lr", "name", "image/post"], writer) val_mat = FLAGS.validation metadata = FLAGS.metadata if metadata: metadata = sorted(Path(FLAGS.metadata).rglob('*.MAT')) metadata = [loadmat(str(m))['metadata'] for m in metadata] metadata = [m[0, 0][0][0] for m in metadata] metadata = [Path(m).parent.parent.stem for m in metadata] metadata[33] = "0158_007_GP_03200_03200_5500_N" metadata = np.asarray([m.split('_') for m in metadata]) assert metadata.shape[1] == 7 if val_mat: val_mat = loadmat(val_mat)['ValidationNoisyBlocksSrgb'] assert val_mat.shape == (40, 32, 256, 256, 3) assert val_mat.dtype == 'uint8' g = enumerate(val_mat.reshape([-1, 256, 256, 3])) for i, img in tqdm.tqdm(g, total=40 * 32, ascii=True): img = Image.fromarray(img, 'RGB') if metadata is not None: suffix = "{}_{}_{}_{}_{}_{}".format(*metadata[i // 32][1:]) img.save("{}/{:04d}_{}.png".format(save_dir, i, suffix)) if FLAGS.results: results = [] g = sorted(Path(FLAGS.results).glob('*.png')) assert len(g) == 40 * 32 print("Appending results...") for img in tqdm.tqdm(g, ascii=True): img = Image.open(img) if img.width != 256 or img.height != 256: img = img.resize([256, 256], Image.BICUBIC) results.append(img_to_array(img)) results = np.stack(results).reshape([40, 32, 256, 256, 3]) savemat("{}/results.MAT".format(save_dir), {"results": results}) print("Saved to {}/results.MAT".format(save_dir))
def test_rgb2yuv(): img = imread(URL) img = img.astype('float32') yuv = rgb_to_yuv(img, 255, 'matlab') array_to_img(yuv).show()
def denoise(): save_dir = Path(FLAGS.save_dir) save_dir.mkdir(exist_ok=True, parents=True) if FLAGS.train_dir: # pre-processing training data train_dir = Path(FLAGS.train_dir) train_gt = sorted(train_dir.rglob('*GT_SRGB_010.PNG')) train_noisy = sorted(train_dir.rglob('*NOISY_SRGB_010.PNG')) assert len(train_gt) == len(train_noisy) # loading images train_gt_img = [Image.open(i) for i in train_gt] train_noisy_img = [Image.open(i) for i in train_noisy] # divide into patches and shave out boarders for name, img in tqdm.tqdm(zip(train_gt, train_gt_img), ascii=True, total=len(train_gt)): folder = save_dir / 'train' / 'gt_patches' folder /= name.stem folder.mkdir(exist_ok=True, parents=True) box = [ FLAGS.shave, FLAGS.shave, img.width - FLAGS.shave, img.height - FLAGS.shave ] img = img.crop(box) w = img.width h = img.height img = img_to_array(img) patches = [] size = FLAGS.patch_size stride = FLAGS.stride img = np.pad(img, [[0, size - h % stride or stride], [0, size - w % stride or stride], [0, 0]], mode='reflect') for i in np.arange(0, h, stride): for j in np.arange(0, w, stride): patches.append(img[i:i + size, j:j + size]) for i, p in enumerate(patches): array_to_img(p, 'RGB').save("{}/{}_{:04d}.png".format( str(folder), name.stem, i)) for name, img in tqdm.tqdm(zip(train_noisy, train_noisy_img), ascii=True, total=len(train_noisy)): folder = save_dir / 'train' / 'noisy_patches' folder /= name.stem folder.mkdir(exist_ok=True, parents=True) box = [ FLAGS.shave, FLAGS.shave, img.width - FLAGS.shave, img.height - FLAGS.shave ] img = img.crop(box) w = img.width h = img.height img = img_to_array(img) patches = [] size = FLAGS.patch_size stride = FLAGS.stride img = np.pad(img, [[0, size - h % stride or stride], [0, size - w % stride or stride], [0, 0]], mode='reflect') for i in np.arange(0, h, stride): for j in np.arange(0, w, stride): patches.append(img[i:i + size, j:j + size]) for i, p in enumerate(patches): array_to_img(p, 'RGB').save("{}/{}_{:04d}.png".format( str(folder), name.stem, i)) val_mat = FLAGS.validation metadata = FLAGS.metadata if metadata: metadata = sorted(Path(FLAGS.metadata).rglob('*.MAT')) metadata = [loadmat(str(m))['metadata'] for m in metadata] metadata = [m[0, 0][0][0] for m in metadata] metadata = [Path(m).parent.parent.stem for m in metadata] metadata[33] = "0158_007_GP_03200_03200_5500_N" metadata = np.asarray([m.split('_') for m in metadata]) assert metadata.shape[1] == 7 if val_mat: val_mat = loadmat(val_mat)['ValidationNoisyBlocksSrgb'] assert val_mat.shape == (40, 32, 256, 256, 3) assert val_mat.dtype == 'uint8' g = enumerate(val_mat.reshape([-1, 256, 256, 3])) for i, img in tqdm.tqdm(g, total=40 * 32, ascii=True): img = Image.fromarray(img, 'RGB') if metadata is not None: suffix = "{}_{}_{}_{}_{}_{}".format(*metadata[i // 32][1:]) img.save("{}/{:04d}_{}.png".format(save_dir, i, suffix)) if FLAGS.results: results = [] g = sorted(Path(FLAGS.results).glob('*.png')) assert len(g) == 40 * 32 print(" [*] Appending results...") for img in tqdm.tqdm(g, ascii=True): img = Image.open(img) if img.width != 256 or img.height != 256: img = img.resize([256, 256], Image.BICUBIC) results.append(img_to_array(img)) results = np.stack(results).reshape([40, 32, 256, 256, 3]) savemat("{}/results".format(save_dir), {"results": results}) print(" [*] Saved to {}/results.mat".format(save_dir))