def group_mat(flags): """Group denoised images into required mat format. Challenge: Real Image Denoising Args: input_dir: path of the result images. save_dir: saving folder or file name of .mat file. """ save_dir = Path(flags.save_dir).resolve() save_dir.mkdir(exist_ok=True, parents=True) if save_dir.is_dir(): save_dir /= 'results' results = [] g = sorted(Path(flags.input_dir).glob('*.png')) assert len(g) == 40 * 32, "Not enough image files!" print(" [*] Appending results...") for img in tqdm.tqdm(g, ascii=True): img = Image.open(img) if img.width != 256 or img.height != 256: img = img.resize([256, 256], Image.BICUBIC) results.append(img_to_array(img)) results = np.stack(results).reshape([40, 32, 256, 256, 3]) savemat(str(save_dir), {"results": results}) print(" [*] Saved to {}.mat".format(save_dir))
def combine(ref: Image, sub: list, stride) -> Image: w = ref.width h = ref.height blank = np.zeros([h, w, 3], 'float32') count = np.zeros([h, w, 1]) k = 0 for i in np.arange(0, h, stride): for j in np.arange(0, w, stride): p = sub[k] k += 1 try: blank[i:i + p.height, j:j + p.width] += img_to_array(p) except ValueError: blank[i:i + p.height, j:j + p.width] += img_to_array(p)[:h - i, :w - j] count[i:i + p.height, j:j + p.width] += 1 blank /= count return array_to_img(blank, 'RGB')
def divide(img: Image, stride: int, size: int) -> list: w = img.width h = img.height img = img_to_array(img) patches = [] img = np.pad(img, [[0, size - h % stride or stride], [0, size - w % stride or stride], [0, 0]], mode='reflect') size - w % stride for i in np.arange(0, h, stride): for j in np.arange(0, w, stride): patches.append(img[i:i + size, j:j + size]) return patches
def test_vid_seek(self): vf = ImageFile(VID, False) f1 = vf.read_frame(1)[0] vf.seek(0, SEEK_SET) f2 = vf.read_frame(1)[0] vf.seek(-1, SEEK_CUR) f3 = vf.read_frame(1)[0] vf.seek(-1, SEEK_END) f4 = vf.read_frame(1)[0] vf.seek(2, SEEK_SET) f5 = vf.read_frame(1)[0] F = [f1, f2, f3, f4, f5] F = [img_to_array(f) for f in F] self.assertTrue(np.all(F[0] == F[1])) self.assertTrue(np.all(F[1] == F[2])) self.assertTrue(np.all(F[3] == F[4]))
def test_image_seek(): vf = ImageFile(IMG, False) f1 = vf.read_frame(1)[0] vf.seek(0, SEEK_SET) f2 = vf.read_frame(1)[0] vf.seek(-1, SEEK_CUR) f3 = vf.read_frame(1)[0] vf.seek(-1, SEEK_END) f4 = vf.read_frame(1)[0] vf.seek(-2, SEEK_END) f5 = vf.read_frame(1)[0] F = [f1, f2, f3, f4, f5] F = [img_to_array(f) for f in F] assert np.all(F[0] == F[1]) assert np.all(F[1] == F[2]) assert np.all(F[3] == F[4])
def test_raw_seek(self): vf = RawFile(RAW, 'YV12', [32, 32]) f1 = vf.read_frame(1)[0] vf.seek(0, SEEK_SET) f2 = vf.read_frame(1)[0] vf.seek(-1, SEEK_CUR) f3 = vf.read_frame(1)[0] vf.seek(-1, SEEK_END) f4 = vf.read_frame(1)[0] vf.seek(-2, SEEK_END) vf.seek(1, SEEK_CUR) f5 = vf.read_frame(1)[0] F = [f1, f2, f3, f4, f5] F = [img_to_array(f) for f in F] self.assertTrue(np.all(F[0] == F[1])) self.assertTrue(np.all(F[1] == F[2])) self.assertTrue(np.all(F[3] == F[4]))
def test_raw_seek(): vf = RawFile(RAW, 'YV12', [1920, 1080]) f1 = vf.read_frame(1)[0] vf.seek(0, SEEK_SET) f2 = vf.read_frame(1)[0] vf.seek(-1, SEEK_CUR) f3 = vf.read_frame(1)[0] vf.seek(-1, SEEK_END) f4 = vf.read_frame(1)[0] vf.seek(-2, SEEK_END) vf.seek(1, SEEK_CUR) f5 = vf.read_frame(1)[0] F = [f1, f2, f3, f4, f5] F = [img_to_array(f) for f in F] assert np.all(F[0] == F[1]) assert np.all(F[1] == F[2]) assert np.all(F[3] == F[4])
def denoise(): save_dir = Path(FLAGS.save_dir) save_dir.mkdir(exist_ok=True, parents=True) if FLAGS.train_dir: train_dir = Path(FLAGS.train_dir) train_gt = sorted(train_dir.rglob('*GT_SRGB_010.PNG')) train_noisy = sorted(train_dir.rglob('*NOISY_SRGB_010.PNG')) assert len(train_gt) == len(train_noisy) writer = tf.io.TFRecordWriter( "{}/ntire_denoise_x{}-train.tfrecords".format(FLAGS.save_dir, FLAGS.scale)) num_each = FLAGS.num // len(train_gt) for gt, noisy in zip(train_gt, train_noisy): print(gt.stem, noisy.stem) name = gt.stem[:4] image_gt = Image.open(gt) image_noisy = Image.open(noisy) _w, _h = image_gt.width, image_gt.height _pw = _ph = FLAGS.patch_size x = np.random.randint(0, _w - _pw + 1, size=num_each) y = np.random.randint(0, _h - _ph + 1, size=num_each) box = [(_x, _y, _x + _pw, _y + _ph) for _x, _y in zip(x, y)] patches_gt = [np.asarray(image_gt.crop(b)) for b in box] patches_noisy = [np.asarray(image_noisy.crop(b)) for b in box] if FLAGS.augment: ops = np.random.randint(0, 2, size=[num_each, 3]) patches_gt = [_augment(p, op) for p, op in zip(patches_gt, ops)] patches_noisy = [_augment(p, op) for p, op in zip(patches_noisy, ops)] for i, patches in tqdm.tqdm(enumerate(zip(patches_gt, patches_noisy)), total=num_each, ascii=True): hr, noise = patches label = "{}_{}".format(name, i).encode() with io.BytesIO() as fp: hr = array_to_img(hr, 'RGB') hr.save(fp, format='png') fp.seek(0) hr_png = fp.read() with io.BytesIO() as fp: lr = hr.resize([hr.width // FLAGS.scale, hr.height // FLAGS.scale], Image.BICUBIC) lr.save(fp, format='png') fp.seek(0) lr_png = fp.read() with io.BytesIO() as fp: array_to_img(noise, 'RGB').save(fp, format='png') fp.seek(0) noisy_png = fp.read() make_tensor_label_records( [hr_png, lr_png, label, noisy_png], ["image/hr", "image/lr", "name", "image/post"], writer) val_mat = FLAGS.validation metadata = FLAGS.metadata if metadata: metadata = sorted(Path(FLAGS.metadata).rglob('*.MAT')) metadata = [loadmat(str(m))['metadata'] for m in metadata] metadata = [m[0, 0][0][0] for m in metadata] metadata = [Path(m).parent.parent.stem for m in metadata] metadata[33] = "0158_007_GP_03200_03200_5500_N" metadata = np.asarray([m.split('_') for m in metadata]) assert metadata.shape[1] == 7 if val_mat: val_mat = loadmat(val_mat)['ValidationNoisyBlocksSrgb'] assert val_mat.shape == (40, 32, 256, 256, 3) assert val_mat.dtype == 'uint8' g = enumerate(val_mat.reshape([-1, 256, 256, 3])) for i, img in tqdm.tqdm(g, total=40 * 32, ascii=True): img = Image.fromarray(img, 'RGB') if metadata is not None: suffix = "{}_{}_{}_{}_{}_{}".format(*metadata[i // 32][1:]) img.save("{}/{:04d}_{}.png".format(save_dir, i, suffix)) if FLAGS.results: results = [] g = sorted(Path(FLAGS.results).glob('*.png')) assert len(g) == 40 * 32 print("Appending results...") for img in tqdm.tqdm(g, ascii=True): img = Image.open(img) if img.width != 256 or img.height != 256: img = img.resize([256, 256], Image.BICUBIC) results.append(img_to_array(img)) results = np.stack(results).reshape([40, 32, 256, 256, 3]) savemat("{}/results.MAT".format(save_dir), {"results": results}) print("Saved to {}/results.MAT".format(save_dir))
def denoise(): save_dir = Path(FLAGS.save_dir) save_dir.mkdir(exist_ok=True, parents=True) if FLAGS.train_dir: # pre-processing training data train_dir = Path(FLAGS.train_dir) train_gt = sorted(train_dir.rglob('*GT_SRGB_010.PNG')) train_noisy = sorted(train_dir.rglob('*NOISY_SRGB_010.PNG')) assert len(train_gt) == len(train_noisy) # loading images train_gt_img = [Image.open(i) for i in train_gt] train_noisy_img = [Image.open(i) for i in train_noisy] # divide into patches and shave out boarders for name, img in tqdm.tqdm(zip(train_gt, train_gt_img), ascii=True, total=len(train_gt)): folder = save_dir / 'train' / 'gt_patches' folder /= name.stem folder.mkdir(exist_ok=True, parents=True) box = [ FLAGS.shave, FLAGS.shave, img.width - FLAGS.shave, img.height - FLAGS.shave ] img = img.crop(box) w = img.width h = img.height img = img_to_array(img) patches = [] size = FLAGS.patch_size stride = FLAGS.stride img = np.pad(img, [[0, size - h % stride or stride], [0, size - w % stride or stride], [0, 0]], mode='reflect') for i in np.arange(0, h, stride): for j in np.arange(0, w, stride): patches.append(img[i:i + size, j:j + size]) for i, p in enumerate(patches): array_to_img(p, 'RGB').save("{}/{}_{:04d}.png".format( str(folder), name.stem, i)) for name, img in tqdm.tqdm(zip(train_noisy, train_noisy_img), ascii=True, total=len(train_noisy)): folder = save_dir / 'train' / 'noisy_patches' folder /= name.stem folder.mkdir(exist_ok=True, parents=True) box = [ FLAGS.shave, FLAGS.shave, img.width - FLAGS.shave, img.height - FLAGS.shave ] img = img.crop(box) w = img.width h = img.height img = img_to_array(img) patches = [] size = FLAGS.patch_size stride = FLAGS.stride img = np.pad(img, [[0, size - h % stride or stride], [0, size - w % stride or stride], [0, 0]], mode='reflect') for i in np.arange(0, h, stride): for j in np.arange(0, w, stride): patches.append(img[i:i + size, j:j + size]) for i, p in enumerate(patches): array_to_img(p, 'RGB').save("{}/{}_{:04d}.png".format( str(folder), name.stem, i)) val_mat = FLAGS.validation metadata = FLAGS.metadata if metadata: metadata = sorted(Path(FLAGS.metadata).rglob('*.MAT')) metadata = [loadmat(str(m))['metadata'] for m in metadata] metadata = [m[0, 0][0][0] for m in metadata] metadata = [Path(m).parent.parent.stem for m in metadata] metadata[33] = "0158_007_GP_03200_03200_5500_N" metadata = np.asarray([m.split('_') for m in metadata]) assert metadata.shape[1] == 7 if val_mat: val_mat = loadmat(val_mat)['ValidationNoisyBlocksSrgb'] assert val_mat.shape == (40, 32, 256, 256, 3) assert val_mat.dtype == 'uint8' g = enumerate(val_mat.reshape([-1, 256, 256, 3])) for i, img in tqdm.tqdm(g, total=40 * 32, ascii=True): img = Image.fromarray(img, 'RGB') if metadata is not None: suffix = "{}_{}_{}_{}_{}_{}".format(*metadata[i // 32][1:]) img.save("{}/{:04d}_{}.png".format(save_dir, i, suffix)) if FLAGS.results: results = [] g = sorted(Path(FLAGS.results).glob('*.png')) assert len(g) == 40 * 32 print(" [*] Appending results...") for img in tqdm.tqdm(g, ascii=True): img = Image.open(img) if img.width != 256 or img.height != 256: img = img.resize([256, 256], Image.BICUBIC) results.append(img_to_array(img)) results = np.stack(results).reshape([40, 32, 256, 256, 3]) savemat("{}/results".format(save_dir), {"results": results}) print(" [*] Saved to {}/results.mat".format(save_dir))