def valid_inner_epoch(model, data_queue, batch_size): sum_score = 0 scale = 1. / 255. xp = utils.get_model_module(model) valid_x, valid_y = data_queue.get() perm = np.random.permutation(len(valid_x)) for i in six.moves.range(0, len(valid_x), batch_size): local_perm = perm[i:i + batch_size] batch_x = xp.array(valid_x[local_perm], dtype=np.float32) * scale batch_y = xp.array(valid_y[local_perm], dtype=np.float32) * scale pred = model(batch_x) score = iproc.clipped_psnr(pred.data, batch_y) sum_score += score * len(batch_x) return sum_score / len(valid_x)
def valid_inner_epoch(model, data_queue, batch_size): sum_score = 0 xp = model.xp valid_x, valid_y = data_queue.get() perm = np.random.permutation(len(valid_x)) with chainer.no_backprop_mode(), chainer.using_config('train', False): for i in six.moves.range(0, len(valid_x), batch_size): local_perm = perm[i:i + batch_size] batch_x = xp.array(valid_x[local_perm], dtype=np.float32) / 255 batch_y = xp.array(valid_y[local_perm], dtype=np.float32) / 255 pred = model(batch_x) score = iproc.clipped_psnr(pred.data, batch_y) sum_score += float(score) * len(batch_x) return sum_score / len(valid_x)
def benchmark(cfg, models, images, sampling_factor, quality): scores = [] for src in images: dst = pairwise_transform.scale(np.array(src), [cfg.downsampling_filter], 1, 1, False) if quality != 100 or cfg.method != 'scale': with iproc.array_to_wand(dst) as tmp: tmp = iproc.jpeg(tmp, sampling_factor, quality) dst = iproc.wand_to_array(tmp) dst = Image.fromarray(dst) if 'noise_scale' in models: dst = upscale_image(cfg, dst, models['noise_scale']) else: if 'noise' in models: dst = denoise_image(cfg, dst, models['noise']) if 'scale' in models: dst = upscale_image(cfg, dst, models['scale']) score = iproc.clipped_psnr(np.array(dst), np.array(src), a_max=255) scores.append(score) return np.mean(scores), np.std(scores) / np.sqrt(len(scores))