コード例 #1
0
def main(args):
    im = piio.read(args.im)

    # The first is always the reference image
    curr = im
    if args.sigma > 0:
        curr = curr + sample_noise(im.shape, args.sigma)
    if args.clip:
        curr = np.clip(curr, 0., 255.)
    curr_bayer = rgb2bayer(curr)
    piio.write(args.out % 0, curr_bayer)

    p_file = open(args.p, 'w')
    # The rest of the images are generated using warping according to a transform
    for i in range(1,args.len):
        p = sample_transform()
        p_file.write(str(p)+'\n')
        curr, _ = warp(im, p)
        if args.sigma > 0:
            curr = curr + sample_noise(im.shape, args.sigma)
        if args.clip:
            curr = np.clip(curr, 0., 255.)

        curr_bayer = rgb2bayer(curr)
        piio.write(args.out % i, curr_bayer)
コード例 #2
0
def cpu(x):
	import tempfile, iio, os

	f = f"{tempfile.NamedTemporaryFile().name}.tiff"
	c = f"cpu {f} 2>/dev/null ; rm {f}"

	iio.write(f, x)
	os.system(c)
コード例 #3
0
def render_shadows(d, s=(1,1,1)):
	import tempfile, iio, os
	fi = tempfile.NamedTemporaryFile(suffix=".tif").name
	fo = tempfile.NamedTemporaryFile(suffix=".png").name
	iio.write(fi, d)
	os.system(f"shadowcast -M {s[0]} {s[1]} {s[2]} {fi} {fo}")
	z = iio.read(fo).squeeze()
	os.system(f"rm -f {fi} {fo}")
	return z
コード例 #4
0
def tonemap(images,
            outdir='.',
            append='',
            outext='.png',
            pctbot=.5,
            pcttop=.5,
            method='med-mad'):
    """
    Apply a basic tone-mapping and save the images in the specified format.

    Args:
        images: (list of str) list of images (paths) to tonemap.
        outdir: (str) output directory. Default is '.'.
        append: (str) text appended to the tone-mapped images.
                Default is nothing.
        outext: (str) the extension to use for the output images (including the
                leading dot). Default is '.png'.
        pctbot: (float) percentage of clipping on the bottom of the dynamic
                range. Default is .5.
        pcttop: (float) percentage of slipping on the top of the dynamic range.
                Default is .5.

    Returns:
        (list of str) list of names of the output images.
    """

    images = [images] if not isinstance(images, list) else images

    out = [
        os.path.join(
            outdir,
            os.path.splitext(os.path.basename(i))[0] + append + outext)
        for i in images
    ]
    ims = [iio.read(i).astype(np.float) for i in images]
    lum = [np.mean(i, axis=2) for i in ims]

    if method == 'med-mad':
        med = [np.median(l) for l in lum]
        mad = [np.median(np.abs(l - m)) for l, m in zip(lum, med)]
        ims = [
            np.clip(np.round(255 * ((i - m) / (n * 6) + 0.5)), 0,
                    255).astype(np.uint8) for i, m, n in zip(ims, med, mad)
        ]

    else:
        pct = [(np.percentile(i, pctbot), np.percentile(i, 100 - pcttop))
               for i in lum]
        ims = [
            np.clip(np.round(255 * (i - p[0]) / (p[1] - p[0])), 0,
                    255).astype(np.uint8) for p, i in zip(pct, ims)
        ]

    for i, o in zip(ims, out):
        iio.write(o, i)

    return out
コード例 #5
0
def write_file(f, img):
    """
    Write a file f.
    """
    img = np.squeeze(img)
    if f[-4:] == 'tiff' or f[-3:] == 'tif':
        piio.write(f, img)
    else:
        img = np.floor(img + 0.5)
        img[img < 0] = 0
        img[img > 255] = 255
        img = np.asarray(img, dtype=np.uint8)
        piio.write(f, img)
コード例 #6
0
ファイル: video_f2f_8sigmas.py プロジェクト: smmzhang/mf2f
def MF2F(**args):
    """
    Main function
    args: Parameters
    """

    ################
    # LOAD THE MODEL
    ################
    if args['network'] == "model/model.pth":
        print("Loading model a pre-trained gaussian FastDVDnet \n")

        model = FastDVDnet(num_input_frames=5)

        #Load saved weights
        state_temp_dict = torch.load(args['network'])

        if cuda:
            device = torch.device("cuda")
            device_ids = [0]
            model = nn.DataParallel(model, device_ids=device_ids).cuda()

        model.load_state_dict(state_temp_dict)

    else:
        model_fn = args['network']
        model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                model_fn)

        model = torch.load(model_fn)[0]
        model.cuda()

        device = torch.device("cuda")

    ##Freeze all the parameters
    for param in model.parameters():
        param.requires_grad = False

    ut_moins_3 = iio.read(args['input'] % (args['first']))
    H, W, C = ut_moins_3.shape
    H = H if H < val else val
    W = W if W < val else val

    sigma11 = create_parameter(args['noise_level'] / 255)
    sigma12 = create_parameter(args['noise_level'] / 255)
    sigma13 = create_parameter(args['noise_level'] / 255)
    sigma14 = create_parameter(args['noise_level'] / 255)
    sigma15 = create_parameter(args['noise_level'] / 255)
    sigma16 = create_parameter(args['noise_level'] / 255)
    sigma17 = create_parameter(args['noise_level'] / 255)
    sigma18 = create_parameter(args['noise_level'] / 255)
    sigma21 = create_parameter(args['noise_level'] / 255)
    sigma22 = create_parameter(args['noise_level'] / 255)
    sigma23 = create_parameter(args['noise_level'] / 255)
    sigma24 = create_parameter(args['noise_level'] / 255)
    sigma25 = create_parameter(args['noise_level'] / 255)
    sigma26 = create_parameter(args['noise_level'] / 255)
    sigma27 = create_parameter(args['noise_level'] / 255)
    sigma28 = create_parameter(args['noise_level'] / 255)
    sigma31 = create_parameter(args['noise_level'] / 255)
    sigma32 = create_parameter(args['noise_level'] / 255)
    sigma33 = create_parameter(args['noise_level'] / 255)
    sigma34 = create_parameter(args['noise_level'] / 255)
    sigma35 = create_parameter(args['noise_level'] / 255)
    sigma36 = create_parameter(args['noise_level'] / 255)
    sigma37 = create_parameter(args['noise_level'] / 255)
    sigma38 = create_parameter(args['noise_level'] / 255)

    #################
    # DEFINE THE LOSS
    #################

    # The loss needs to be changed when used with different networks
    lr = args['lr']

    weight_decay = 0.00001

    criterion = Loss()
    criterion.cuda()
    optimizer = optim.Adam([{
        'params': [sigma11]
    }, {
        'params': [sigma12]
    }, {
        'params': [sigma13]
    }, {
        'params': [sigma14]
    }, {
        'params': [sigma15]
    }, {
        'params': [sigma16]
    }, {
        'params': [sigma17]
    }, {
        'params': [sigma18]
    }, {
        'params': [sigma21]
    }, {
        'params': [sigma22]
    }, {
        'params': [sigma23]
    }, {
        'params': [sigma24]
    }, {
        'params': [sigma25]
    }, {
        'params': [sigma26]
    }, {
        'params': [sigma27]
    }, {
        'params': [sigma28]
    }, {
        'params': [sigma31]
    }, {
        'params': [sigma32]
    }, {
        'params': [sigma33]
    }, {
        'params': [sigma34]
    }, {
        'params': [sigma35]
    }, {
        'params': [sigma36]
    }, {
        'params': [sigma37]
    }, {
        'params': [sigma38]
    }],
                           lr=lr,
                           betas=(0.2, 0.2),
                           eps=1e-08,
                           weight_decay=weight_decay,
                           amsgrad=False)

    #####   Useful thinks   #####

    list_PSNR_training = []
    list_PSNR_eval = []

    #Initialisation

    frame = iio.read(args['input'] % (args['first']))

    H, W, C = frame.shape
    H = H if H < val else val
    W = W if W < val else val

    # Write the psnr per frame in this file
    output_path = os.path.dirname(args['output']) + "/"

    path_psnr = output_path + "PSNR.txt"
    path_ssim = output_path + "SSIM.txt"
    path_training = output_path + "PSNR_training.txt"
    path_ssim_training = output_path + "SSIM_training.txt"

    plot_psnr = open(path_psnr, 'w')
    plot_ssim = open(path_ssim, 'w')
    plot_psnr_training = open(path_training, 'w')
    plot_ssim_training = open(path_ssim_training, 'w')

    ###########
    # MAIN LOOP
    ###########
    for i in range(args['first'] + 4, args['last'] - 3):

        ut_moins_4 = reads_image(args['input'] % (i - 4), H, W)
        ut_moins_3 = reads_image(args['input'] % (i - 3), H, W)
        ut_moins_2 = reads_image(args['input'] % (i - 2), H, W)
        ut_moins_1 = reads_image(args['input'] % (i - 1), H, W)
        ut = reads_image(args['input'] % (i), H, W)
        ut_plus_1 = reads_image(args['input'] % (i + 1), H, W)
        ut_plus_2 = reads_image(args['input'] % (i + 2), H, W)
        ut_plus_3 = reads_image(args['input'] % (i + 3), H, W)
        ut_plus_4 = reads_image(args['input'] % (i + 4), H, W)

        #Creation of the stack
        if i % 2 == (args['first'] % 2):

            inframes = [ut_moins_4, ut_moins_2, ut, ut_plus_2, ut_plus_4]
            stack1 = torch.stack(inframes, dim=0).contiguous().view(
                (1, 5 * C, H, W)).cuda()
            stack1.requires_grad = False
            stack = stack1

            flow1 = gives_flow(args['flow'] % (i - 1), H, W)
            mask1, exclusive_mask1 = gives_masks(
                args['mask_collision'] % (i - 1),
                args['mask_warping_res'] % (i - 1), H, W)

        else:
            inframes = [ut_moins_4, ut_moins_2, ut, ut_plus_2, ut_plus_4]
            stack2 = torch.stack(inframes, dim=0).contiguous().view(
                (1, 5 * C, H, W)).cuda()
            stack2.requires_grad = False
            stack = stack2

            flow2 = gives_flow(args['flow'] % (i - 1), H, W)
            mask2, exclusive_mask2 = gives_masks(
                args['mask_collision'] % (i - 1),
                args['mask_warping_res'] % (i - 1), H, W)

            model.eval()
            optimizer.zero_grad()

            for it in range(args['iter']):
                ##Define noise_map depending on luminosity
                u1, u2, u3, u4, u5, u6, u7, u8 = find_brightness(ut_moins_1)
                noise_map_moins_1 = build_variance_map(
                    u1, u2, u3, u4, u5, u6, u7, u8, sigma11, sigma12, sigma13,
                    sigma14, sigma15, sigma16, sigma17, sigma18)

                u1, u2, u3, u4, u5, u6, u7, u8 = find_brightness(ut)
                noise_map = build_variance_map(u1, u2, u3, u4, u5, u6, u7, u8,
                                               sigma21, sigma22, sigma23,
                                               sigma24, sigma25, sigma26,
                                               sigma27, sigma28)

                u1, u2, u3, u4, u5, u6, u7, u8 = find_brightness(ut_plus_1)
                noise_map_plus_1 = build_variance_map(u1, u2, u3, u4, u5, u6,
                                                      u7, u8, sigma31, sigma32,
                                                      sigma33, sigma34,
                                                      sigma35, sigma36,
                                                      sigma37, sigma38)

                optimizer.zero_grad()
                out_train1 = temp_denoise_8_sigmas(model, stack1,
                                                   noise_map_moins_1,
                                                   noise_map, noise_map_plus_1)
                out_train2 = temp_denoise_8_sigmas(model, stack2,
                                                   noise_map_moins_1,
                                                   noise_map, noise_map_plus_1)
                loss = criterion(out_train1, ut_moins_2, flow1, mask1,
                                 exclusive_mask1, out_train2, ut_moins_1,
                                 flow2, mask2, exclusive_mask2, i)
                loss.backward()
                optimizer.step()
                del loss

        ##Define noise_map depending on luminosity
        u1, u2, u3, u4, u5, u6, u7, u8 = find_brightness(ut_moins_2)
        noise_map_moins_1 = build_variance_map(u1, u2, u3, u4, u5, u6, u7, u8,
                                               sigma11, sigma12, sigma13,
                                               sigma14, sigma15, sigma16,
                                               sigma17, sigma18)

        u1, u2, u3, u4, u5, u6, u7, u8 = find_brightness(ut)
        noise_map = build_variance_map(u1, u2, u3, u4, u5, u6, u7, u8, sigma21,
                                       sigma22, sigma23, sigma24, sigma25,
                                       sigma26, sigma27, sigma28)

        u1, u2, u3, u4, u5, u6, u7, u8 = find_brightness(ut_plus_2)
        noise_map_plus_1 = build_variance_map(u1, u2, u3, u4, u5, u6, u7, u8,
                                              sigma31, sigma32, sigma33,
                                              sigma34, sigma35, sigma36,
                                              sigma37, sigma38)

        #Compute and save the denoising
        model.eval()
        with torch.no_grad():
            #denoise with training stack :
            outimg = temp_denoise_8_sigmas(model, stack, noise_map_moins_1,
                                           noise_map, noise_map_plus_1)
            outimg = torch.clamp(outimg, 0, 1)
            outimg = np.array(outimg.cpu())
            outimg = np.squeeze(outimg)
            outimg = outimg.transpose(1, 2, 0)

            #denoise with the natural stack

            inframes = [ut_moins_2, ut_moins_1, ut, ut_plus_1, ut_plus_2]
            stack = torch.stack(inframes, dim=0).contiguous().view(
                (1, 5 * C, H, W)).cuda()
            outimg2 = temp_denoise_8_sigmas(model, stack, noise_map_moins_1,
                                            noise_map, noise_map_plus_1)
            outimg2 = torch.clamp(outimg2, 0, 1)
            outimg2 = np.array(outimg2.cpu())
            outimg2 = np.squeeze(outimg2)
            outimg2 = outimg2.transpose(1, 2, 0)

        #store the results
        iio.write(output_path + "training_{:03d}.png".format(i),
                  (outimg * 255))
        iio.write(args['output'] % i, (outimg2 * 255))

        # Load frame to compute the PSNR
        ref_frame = iio.read(args['ref'] % (i))[:val, :val, :]

        # Compute the PSNR according to the reference frame
        quant_training_stack = psnr(
            ref_frame.astype(outimg.dtype) / 255, outimg)
        quant_eval_stack = psnr(
            ref_frame.astype(outimg2.dtype) / 255., outimg2)
        if quant_eval_stack > quant_training_stack:
            value = 1
        else:
            value = 0

        ssim_training = ssim(outimg * 255, ref_frame)
        ssim_eval = ssim(outimg2 * 255, ref_frame)
        print(
            "Iteration = {:2d}, PSNR training stack = {:5.3f}, PSNR eval stack = {:5.3f}, SSIM training stack {:4.3f}, SSIM eval stack = {:4.3f} {:1d}"
            .format(i, quant_training_stack, quant_eval_stack, ssim_training,
                    ssim_eval, value))

        list_PSNR_training.append(quant_training_stack)
        list_PSNR_eval.append(quant_eval_stack)
        plot_psnr.write(str(quant_eval_stack) + '\n')
        plot_ssim.write(str(ssim_eval) + '\n')
        plot_psnr_training.write(str(quant_training_stack) + '\n')
        plot_ssim_training.write(str(ssim_training) + '\n')

    tab_PSNR_training = np.array(list_PSNR_training)
    tab_PSNR_eval = np.array(list_PSNR_eval)
    print(
        "Average PSNR: training stack = {:5.3f}, eval stack = {:5.3f}".format(
            np.mean(tab_PSNR_training), np.mean(tab_PSNR_eval)))

    torch.save([model, optimizer],
               output_path + args['output_network_after_training'])
    plot_psnr.close()
    plot_ssim.close()
    plot_psnr_training.close()
    plot_ssim_training.close()
コード例 #7
0
xyz_array = np.array([
    np.broadcast_to(np.arange(w, dtype='float32') / 2, (h, w)),
    np.broadcast_to(
        np.transpose(np.arange(h, dtype='float32')[np.newaxis], (1, 0)) / 2,
        (h, w)), 100 + image * 50
]).transpose((1, 2, 0))

r = 5.0  # filtering radius, in meters
n = 50  # number of points (under which a pixel is rejected)
img_gsd = 1

xyz_array2 = xyz_array.copy()

filter_xyz(xyz_array, r, n, img_gsd)
filter_xyz_bis(xyz_array2, r, n, img_gsd)

output = xyz_array[:, :, 2].squeeze()
output2 = xyz_array2[:, :, 2].squeeze()
easting = xyz_array[:, :, 0].squeeze()
northing = xyz_array[:, :, 1].squeeze()

iio.write('test_out.tif', (output - 100) / 50)
iio.write('test_out2.tif', (output2 - 100) / 50)
iio.write('test_easting.tif', easting)
iio.write('test_northing.tif', northing)

# profiling conclusion:
# filter_xyz_bis where the NAN assignement is done with the c function is not
# faster, maybe even slightly slower that filter_xyz (from s2p)
コード例 #8
0
def deblur(input,
           kernel_size,
           output,
           outputk=None,
           sigma=0,
           lr=0.01,
           reg_noise_std=0.001,
           num_iter=5000,
           normalization=1):
    INPUT = 'noise'
    pad = 'reflection'

    kernel_size = [kernel_size, kernel_size]

    import iio
    imgs = iio.read(input) / normalization
    imgs = imgs.transpose((2, 0, 1))
    y = np_to_torch(imgs).type(dtype)

    img_size = imgs.shape

    padh, padw = kernel_size[0] - 1, kernel_size[1] - 1
    '''
    x_net:
    '''
    input_depth = 8

    net_input = get_noise(
        input_depth, INPUT,
        (img_size[1] + padh, img_size[2] + padw)).type(dtype).detach()

    net = skip(input_depth,
               3,
               num_channels_down=[128, 128, 128, 128, 128],
               num_channels_up=[128, 128, 128, 128, 128],
               num_channels_skip=[16, 16, 16, 16, 16],
               upsample_mode='bilinear',
               need_sigmoid=True,
               need_bias=True,
               pad=pad,
               act_fun='LeakyReLU')
    net = net.type(dtype)
    '''
    k_net:
    '''
    n_k = 200
    net_input_kernel = get_noise(n_k, INPUT, (1, 1)).type(dtype).detach()
    net_input_kernel = net_input_kernel.squeeze()

    net_kernel = fcn(n_k, kernel_size[0] * kernel_size[1])
    net_kernel = net_kernel.type(dtype)

    # Losses
    mse = torch.nn.MSELoss().type(dtype)
    L1 = torch.nn.L1Loss(reduction='sum').type(dtype)
    lambda_ = 0.1 * sigma / normalization
    tv_loss = TVLoss(tv_loss_weight=lambda_).type(dtype)

    # optimizer
    optimizer = torch.optim.Adam([{
        'params': net.parameters()
    }, {
        'params': net_kernel.parameters(),
        'lr': 1e-4
    }],
                                 lr=lr)
    ml = [
        int(num_iter * 2000 / 5000),
        int(num_iter * 3000 / 5000),
        int(num_iter * 4000 / 5000)
    ]
    scheduler = MultiStepLR(optimizer, milestones=ml,
                            gamma=0.5)  # learning rates

    # initilization inputs
    net_input_saved = net_input.detach().clone()
    net_input_kernel_saved = net_input_kernel.detach().clone()

    ### start SelfDeblur
    for step in tqdm(range(num_iter)):

        # input regularization
        net_input = net_input_saved + reg_noise_std * torch.zeros(
            net_input_saved.shape).type_as(net_input_saved.data).normal_()

        # change the learning rate
        scheduler.step(step)
        optimizer.zero_grad()

        # get the network output
        out_x = net(net_input)
        out_k = net_kernel(net_input_kernel)

        out_k_m = out_k.view(-1, 1, kernel_size[0], kernel_size[1])
        out_y = nn.functional.conv2d(out_x,
                                     out_k_m.expand((3, -1, -1, -1)),
                                     padding=0,
                                     bias=None,
                                     groups=3)

        total_loss = mse(out_y, y) + tv_loss(out_x)
        total_loss.backward()
        optimizer.step()

    out_x_np = torch_to_np(out_x)
    out_x_np = out_x_np.squeeze()
    out_x_np = out_x_np[:, padh // 2:padh // 2 + img_size[1],
                        padw // 2:padw // 2 + img_size[2]]
    out_x_np = out_x_np.transpose((1, 2, 0))
    iio.write(output, out_x_np * normalization)

    if outputk:
        out_k_np = torch_to_np(out_k_m)
        out_k_np = out_k_np.squeeze()
        iio.write(outputk, out_k_np)
コード例 #9
0
im1 = iio.read('data/carc1.jpg').astype(np.float32)
im2 = iio.read('data/carc2.jpg').astype(np.float32)
H = np.zeros(9, dtype=np.float32)
h1, w1, c1 = im1.shape
h2, w2, c2 = im2.shape

im1g = np.mean(im1, axis=2)
im2g = np.mean(im2, axis=2)
print(im1.shape)

w = max(w1,w2)
z = w / (w1 + w2)
h = int(z*max(h1,h2))

inl = np.zeros((3, h, w), dtype = np.float32)
outl = np.zeros((3, h, w), dtype = np.float32)

h = max(h1,h2)
im1w = np.zeros((3, h, w), dtype = np.float32)
im2w = np.zeros((3, h, w), dtype = np.float32)
mosaic = np.zeros((3, h, w), dtype = np.float32)

detection = orsa.estimate_homography_py(im1, w1, h1, c1, im2, w2, h2, c2, 0, 0.6, H, inl, outl, im1w, im2w, mosaic)

iio.write('out/in.png', inl.transpose(1,2,0))
iio.write('out/out.png', outl.transpose(1,2,0))

iio.write("out/im1_warped.png", im1w.transpose(1,2,0))
iio.write("out/im2_warped.png", im2w.transpose(1,2,0))
iio.write("out/mosaic.png", mosaic.transpose(1,2,0))
コード例 #10
0
def write_tensor(path, tensor):
    import iio
    tensor = tensor.permute((0, 2, 3, 1)).squeeze()
    iio.write(path, tensor.cpu().detach().numpy())
コード例 #11
0
# shitfuckery is only because somebody has to extract the damn numbers from
# the damn files.
# --eml

# extract input argument
import sys
filename_in = sys.argv[1]
dataset_id = sys.argv[2]
filename_out = sys.argv[3]

# load file
import h5py
f = h5py.File(filename_in, "r")

# select dataset with the lovely syntax of the h5py object
x = f[dataset_id][()]

# in case the array is four dimensional but the first dimension is trivial
# (this is actually the typical convention for HDF5 multispectral images)
# then reove the first dimension and keep the rest of them
if len(x.shape) == 4 and x.shape[0] == 1:
    x = x[0, :, :, :]

# same thing for three-dimensional array
if len(x.shape) == 3 and x.shape[0] == 1:
    x = x[0, :, :]

# save the array to the output file
import iio
iio.write(filename_out, x)
コード例 #12
0
def main(args):

    # Load the network for the specific application
    model_ref = demosaick_load_model(args.net_path,
                                     args.noise,
                                     xtrans=(args.mosaic_type == 'xtrans'))
    if args.gpu:
        model_ref.cuda()
    else:
        model_ref.cpu()

    # Pad image to avoid border effects
    crop = 48
    print("Crop", crop)

    #Iref = skimage.io.imread(args.input)
    Iref = iio.read(args.input).squeeze()
    if len(Iref.shape) == 4:  # removes alpha
        Iref = Iref[:, :, :3]
    dtype = Iref.dtype
    if dtype not in [np.uint8, np.uint16, np.float16, np.float32, np.float64]:
        raise ValueError('Input type not handled: {}'.format(dtype))

    # if integers make floats
    if args.real:
        Iref /= 65535.
    else:
        Iref /= 255.

    if args.linear_input:
        print("  - Input is linear, mapping to sRGB for processing")
        Iref = np.power(Iref, 1.0 / 2.2)

    if len(Iref.shape) == 2:
        # Offset the image to match the our mosaic pattern
        if args.offset_x > 0:
            print('  - offset x')
            # Iref = Iref[:, 1:]
            Iref = np.pad(Iref, [(0, 0), (args.offset_x, 0)], 'symmetric')

        if args.offset_y > 0:
            print('  - offset y')
            # Iref = Iref[1:, :]
            Iref = np.pad(Iref, [(args.offset_y, 0), (0, 0)], 'symmetric')
        has_groundtruth = False
        Iref = np.dstack((Iref, Iref, Iref))
    else:
        # No need for offsets if we have the ground-truth
        has_groundtruth = True

    if has_groundtruth and args.noise > 0:
        print('  - adding noise sigma={:.3f}'.format(args.noise))
        I = Iref + np.random.normal(loc=0.0, scale=args.noise, size=Iref.shape)
    else:
        I = Iref

    if crop > 0:
        if args.mosaic_type == 'bayer':
            c = crop + (crop % 2
                        )  # Make sure we don't change the pattern's period
            I = np.pad(I, [(c, c), (c, c), (0, 0)], 'symmetric')
        else:
            c = crop + (crop % 6
                        )  # Make sure we don't change the pattern's period
            I = np.pad(I, [(c, c), (c, c), (0, 0)], 'symmetric')

    if has_groundtruth:
        print('  - making mosaick')
    else:
        print('  - formatting mosaick')

    I = np.array(I).transpose(2, 0, 1).astype(np.float32)

    if args.mosaic_type == 'xtrans':
        M = xtrans_mosaic(I)
    else:
        M = bayer_mosaic(I)
    #im = np.expand_dims(im, 0)
    # the othe field is just the mask
    M = np.array(M)[:1, :, :, :]

    with th.no_grad():
        R, runtime = demosaick(model_ref, M, args.noise, args.tile_size, crop)

    R = R.squeeze().transpose(1, 2, 0)

    # Remove the padding
    if crop > 0:
        R = R[c:-c, c:-c, :]
        I = I[c:-c, c:-c, :]
        M = M[c:-c, c:-c, :]

    if not has_groundtruth:
        if args.offset_x > 0:
            print('  - remove offset x')
            R = R[:, args.offset_x:]
            I = I[:, args.offset_x:]
            M = M[:, args.offset_x:]

        if args.offset_y > 0:
            print('  - remove offset y')
            R = R[args.offset_y:, :]
            I = I[args.offset_y:, :]
            M = M[args.offset_y:, :]

    if len(Iref.shape) == 2:
        # Offset the image to match the our mosaic pattern
        if args.offset_x == 1:
            print('  - offset x')
            Iref = Iref[:, 1:]

        if args.offset_y == 1:
            print('  - offset y')
            Iref = Iref[1:, :]
        has_groundtruth = False

    if args.linear_input:
        print("  - Input is linear, mapping output back from sRGB")
        R = np.power(R, 2.2)

    if has_groundtruth:
        p = _psnr(R, Iref, crop=crop)
        file_psnr = open(args.output_psnr, 'w')
        file_psnr.write(str(p))
        file_psnr.close()
        print('  PSNR = {:.1f} dB, time = {} ms'.format(p, int(runtime)))
    else:
        print('  - raw image without groundtruth, bypassing metric')

    if args.real:
        out = R * 65535.
    else:
        out = R * 255.

    # Write output image
    #skimage.io.imsave(args.output, out)
    iio.write(args.output, out)
コード例 #13
0
def save_image(path, img):
    img = image.detach().cpu().numpy.squeeze()
    img = img.transpose(1, 2, 0)
    img = (img * 255).astype(np.uint8)
    iio.write(path, img)
コード例 #14
0
#!/usr/bin/env python3

import iio

d = iio.read('testimg.tif')
print(d.shape)
print(d[:, :, 0])
iio.write('kk2.tif', d + 1000)
コード例 #15
0
import iio                   # bibliothèque d'entrée/sortie de fichiers image
x = iio.read("fuji.npy")     # lecture du tableau d'hauteurs sur l'image x
y = x[1:,] - x[:-1,]         # dérivée partielle par différences finies
Y = 127 + 2 * y              # cadrage du rang dans [0,255] (à la louche)
iio.write("fuji_dy.png", Y)  # écriture de la dérivée partielle comme image
コード例 #16
0
def MF2F(**args):
    """
    Main function
    args: Parameters
    """

    ################
    # LOAD THE MODEL
    ################
        
    model = FastDVDnet(num_input_frames=5)
   
    #Load saved weights
    state_temp_dict = torch.load(args['network'])

    if cuda:
        device = torch.device("cuda")
        device_ids = [0]
        model = nn.DataParallel(model, device_ids = device_ids).cuda()
    
    model.load_state_dict(state_temp_dict)

    ## Define a parameter sigma for the non teacher network
    sigma = torch.tensor([args['noise_level']/255.], requires_grad=True).cuda()
    sigma = torch.nn.Parameter(sigma)

    #################
    # DEFINE THE LOSS
    #################

    # The loss needs to be changed when used with different networks
    lr = args['lr']
    weight_decay = 0.00001
    
    criterion_student = Loss() 
    criterion_student.cuda()

    optimizer_student = optim.Adam([{'params':model.parameters()}, {'params':[sigma], 'lr':0.02, 'betas':(0.15,0.15)}], lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=weight_decay, amsgrad=False)


    #####   Useful thinks   #####

    list_PSNR_training = []
    list_PSNR_eval     = []
    
    #Initialisation
    frame = iio.read(args['input'] % (args['first']))

    H, W, C = frame.shape
    H = H if H < val else val
    W = W if W < val else val
    noise_map = sigma.expand((1, 1, H, W))
    
    # Write the psnr per frame in this file
    output_path = os.path.dirname(args['output']) + "/"
    path_psnr          = output_path + "PSNR.txt"
    path_ssim          = output_path + "SSIM.txt"
    path_training      = output_path + "PSNR_training.txt"
    path_ssim_training = output_path + "SSIM_training.txt"

    plot_psnr          = open(path_psnr, 'w')
    plot_ssim          = open(path_ssim, 'w')
    plot_psnr_training = open(path_training, 'w')
    plot_ssim_training = open(path_ssim_training, 'w')


    ###########
    # MAIN LOOP
    ###########
    for i in range(args['first']+4, args['last']-3):

        ut_moins_4 = reads_image(args['input'] % (i-4), H, W)
        ut_moins_3 = reads_image(args['input'] % (i-3), H, W)
        ut_moins_2 = reads_image(args['input'] % (i-2), H, W)
        ut_moins_1 = reads_image(args['input'] % (i-1), H, W)
        ut         = reads_image(args['input'] % (i)  , H, W)
        ut_plus_1  = reads_image(args['input'] % (i+1), H, W)
        ut_plus_2  = reads_image(args['input'] % (i+2), H, W)
        ut_plus_3  = reads_image(args['input'] % (i+3), H, W)
        ut_plus_4  = reads_image(args['input'] % (i+4), H, W)
        
        #Creation of the stack
        if i%2==(args['first']%2):
                
            inframes = [ut_moins_4, ut_moins_2, ut, ut_plus_2, ut_plus_4]
            stack1 = torch.stack(inframes, dim=0).contiguous().view((1, 5*C, H, W)).cuda()
            stack = stack1

            fastdvdnet1 = iio.read(args['teacher_outputs']%i)[:val,:val] / 255.
            fastdvdnet1 = fastdvdnet1.transpose(2,0,1)
            fastdvdnet1 = np.expand_dims(fastdvdnet1, 0)
            fastdvdnet1 = torch.tensor(fastdvdnet1).cuda()

            flow1 = gives_flow(args['flow'] % (i-1), H, W)
            mask1, exclusive_mask1 = gives_masks(args['mask_collision']%(i-1), args['mask_warping_res']%(i-1), H, W)
            
        else:
            inframes = [ut_moins_4, ut_moins_2, ut, ut_plus_2, ut_plus_4]
            stack2 = torch.stack(inframes, dim=0).contiguous().view((1, 5*C, H, W)).cuda()
            stack = stack2
            
            fastdvdnet2 = iio.read(args['teacher_outputs']%i)[:val, :val] / 255.
            fastdvdnet2 = fastdvdnet2.transpose(2,0,1)
            fastdvdnet2 = np.expand_dims(fastdvdnet2, 0)
            fastdvdnet2 = torch.tensor(fastdvdnet2).cuda()

            flow2 = gives_flow(args['flow'] % (i-1), H, W)
            mask2, exclusive_mask2 = gives_masks(args['mask_collision']%(i-1), args['mask_warping_res']%(i-1), H, W)
        
            model.eval()
            optimizer_student.zero_grad()

            for it in range(args['iter']):
                optimizer_student.zero_grad()   
                out_train1 = temp_denoise(model, stack1, noise_map)
                out_train2 = temp_denoise(model, stack2, noise_map)
                loss_student = criterion_student(out_train1, ut_moins_2, flow1, mask1, exclusive_mask1, fastdvdnet1, out_train2, ut_moins_1, flow2, mask2, exclusive_mask2, fastdvdnet2) 
                loss_student.backward()
                optimizer_student.step()
                del loss_student

        #Compute and save the denoising
        model.eval()
        with torch.no_grad():
            #denoise with the training stack : 
            outimg = temp_denoise(model, stack, noise_map)
            outimg = tensor_to_image(outimg)

            #denoise with the natural stack
            inframes = [ut_moins_2, ut_moins_1, ut, ut_plus_1, ut_plus_2]
            stack = torch.stack(inframes, dim=0).contiguous().view((1, 5*C, H, W)).cuda()
            outimg2 = temp_denoise(model, stack, noise_map)
            outimg2 = tensor_to_image(outimg2)
            
        #store the results
        iio.write(output_path + "training_{:03d}.png".format(i), (outimg*255))
        iio.write(args['output']%i, (outimg2*255))

        # Load frame to compute the PSNR
        ref_frame = iio.read(args['ref'] % (i))[:val, :val, :] 

        # Compute the PSNR according to the reference frame
        quant_our_stack = psnr(ref_frame.astype(outimg.dtype)/255, outimg)
        quant_Tassano_stack = psnr(ref_frame.astype(outimg2.dtype)/255., outimg2)
        if quant_Tassano_stack > quant_our_stack:
            value = 1
        else:
            value = 0
        
        ssim_our = ssim(outimg*255, ref_frame)
        ssim_Tassano = ssim(outimg2*255, ref_frame)

        print("Itération = {:02d}, PSNR our stack = {:5.3f}, PSNR Tassano's stack = {:5.3f}, SSIM our {:4.3f}, SSIM Tassano's = {:4.3f}  {:1d}".format(i, quant_our_stack, quant_Tassano_stack, ssim_our, ssim_Tassano,  value))

        list_PSNR_training.append(quant_our_stack)
        list_PSNR_eval.append(quant_Tassano_stack)
        plot_psnr.write(str(quant_Tassano_stack)+'\n')
        plot_ssim.write(str(ssim_Tassano)+'\n')
        plot_psnr_training.write(str(quant_our_stack)+'\n')
        plot_ssim_training.write(str(ssim_our)+'\n')
            
        del outimg
        del outimg2
        del stack

    torch.save([model, optimizer], output_path + "final_network.pth")

    tab_PSNR_training = np.array(list_PSNR_training)
    tab_PSNR_eval     = np.array(list_PSNR_eval)
    print("Average PSNR: training stack = {:5.3f}, natural stack = {:5.3f}".format(np.mean(tab_PSNR_training), np.mean(tab_PSNR_eval)))
    plot_psnr.close()
    plot_ssim.close()
    plot_psnr_training.close()
    plot_ssim_training.close()
コード例 #17
0
ファイル: main.py プロジェクト: tehret/rcmfd
import iio
from rcmfd import rcmfd, NULL, STR, wrap
import numpy as np

im = iio.read('data/forged.tif').astype(np.float32)
# Remove extra transparency channels 
if im.shape[2] > 3:
    im = im[:,:,:3]
ps = 8
tau = 1.
automatic = True
out = np.zeros(im.shape, dtype=np.float32)
im = np.ascontiguousarray(im.transpose(2,0,1))
c, h, w = im.shape
detection = rcmfd.perform_matching_py(c, im, w, h, ps, tau, automatic, out, False)
iio.write("test.png", out)

print("This image is a forgery: ", detection)
コード例 #18
0
def MF2F(**args):
    """
    Main function
    args: Parameters
    """

    ################
    # LOAD THE MODEL
    ################
    if args['network'] == "model/model.pth":
        print("Loading model a pre-trained FastDVDnet \n")

        model = FastDVDnet(num_input_frames=5)

        #Load saved weights
        state_temp_dict = torch.load(args['network'])

        if cuda:
            device = torch.device("cuda")
            device_ids = [0]
            model = nn.DataParallel(model, device_ids=device_ids).cuda()

        model.load_state_dict(state_temp_dict)
    else:
        model_fn = args['network']
        model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                model_fn)

        model = torch.load(model_fn)[0]
        model.cuda()

        device = torch.device("cuda")

    #################
    # DEFINE THE LOSS
    #################

    # The loss needs to be changed when used with different networks
    lr = args['lr']
    weight_decay = 0.00001

    criterion = Loss()
    criterion.cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=lr,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=weight_decay,
                           amsgrad=False)

    #####   Useful thinks   #####

    list_PSNR_training = []
    list_PSNR_eval = []

    #Initialisation

    frame = iio.read(args['input'] % (args['first']))

    H, W, C = frame.shape
    H = H if H < val else val
    W = W if W < val else val
    noise_std = Variable(
        torch.FloatTensor(np.array(args['noise_level'] / 255.)))
    noise_map = noise_std.expand((1, 1, H, W))

    # Write the psnr per frame in this file
    output_path = os.path.dirname(args['output']) + "/"
    path_psnr = output_path + "PSNR.txt"
    path_psnr_tot = output_path + "PSNR_total.txt"
    path_ssim = output_path + "SSIM.txt"
    path_ssim_tot = output_path + "SSIM_total.txt"
    path_training = output_path + "PSNR_training.txt"
    path_ssim_training = output_path + "SSIM_training.txt"
    plot_psnr = open(path_psnr, 'w')
    plot_ssim = open(path_ssim, 'w')
    plot_psnr_tot = open(path_psnr_tot, 'w')
    plot_ssim_tot = open(path_ssim_tot, 'w')
    plot_psnr_training = open(path_training, 'w')
    plot_ssim_training = open(path_ssim_training, 'w')

    optimizer.zero_grad()

    ###########
    # MAIN LOOP
    ###########
    for training in range(args['nb_trainings']):
        i = np.random.randint(
            args['first'] + 4,
            args['last'] - 3)  #index of center frame (ut use for the training)

        ut_moins_4 = reads_image(args['input'] % (i - 4), H, W)
        ut_moins_3 = reads_image(args['input'] % (i - 3), H, W)
        ut_moins_2 = reads_image(args['input'] % (i - 2), H, W)
        ut_moins_1 = reads_image(args['input'] % (i - 1), H, W)
        ut = reads_image(args['input'] % (i), H, W)
        ut_plus_1 = reads_image(args['input'] % (i + 1), H, W)
        ut_plus_2 = reads_image(args['input'] % (i + 2), H, W)
        ut_plus_3 = reads_image(args['input'] % (i + 3), H, W)
        ut_plus_4 = reads_image(args['input'] % (i + 4), H, W)

        #Creation of the stack

        inframes = [ut_moins_4, ut_moins_2, ut, ut_plus_2, ut_plus_4]
        stack = torch.stack(inframes, dim=0).contiguous().view(
            (1, 5 * C, H, W)).cuda()

        flow = gives_flow(args['flow'] % (i - 1), H, W)
        mask, exclusive_mask = gives_masks(args['mask_collision'] % (i - 1),
                                           args['mask_warping_res'] % (i - 1),
                                           H, W)

        model.eval()

        out_train = temp_denoise(model, stack, noise_map)
        loss = criterion(out_train, ut_moins_1, flow, mask, exclusive_mask)
        loss.backward()

        ## Do the backward and step every loss.backward()
        if training % args['nb_trainings_before_step'] == 0 and training >= 1:
            optimizer.step()
            del loss
            optimizer.zero_grad()
            color = '\033[1;31;40m'
        else:
            color = '\033[0;37;40m'

        #Compute and save the denoising
        model.eval()
        with torch.no_grad():
            #denoise with the training stack :
            outimg = temp_denoise(model, stack, noise_map)
            outimg = tensor_to_image(outimg)

            #denoise with the natural stack
            inframes = [ut_moins_2, ut_moins_1, ut, ut_plus_1, ut_plus_2]
            stack = torch.stack(inframes, dim=0).contiguous().view(
                (1, 5 * C, H, W)).cuda()
            outimg2 = temp_denoise(model, stack, noise_map)
            outimg2 = tensor_to_image(outimg2)

        # Load frame to compute the PSNR
        ref_frame = iio.read(args['ref'] % (i))[:val, :val, :]

        # Compute the PSNR according to the reference frame
        quant_training_stack = psnr(
            ref_frame.astype(outimg.dtype) / 255, outimg)
        quant_eval_stack = psnr(
            ref_frame.astype(outimg2.dtype) / 255., outimg2)
        if quant_eval_stack > quant_training_stack:
            value = 1
        else:
            value = 0

        ssim_training = ssim(outimg * 255, ref_frame)
        ssim_eval = ssim(outimg2 * 255, ref_frame)

        plot_psnr_tot.write(str(quant_eval_stack) + '\n')
        plot_ssim_tot.write(str(ssim_eval) + '\n')

        print(
            color +
            "Paires = {:03d}-{:03d}, PSNR training stack = {:5.3f}, PSNR eval stack = {:5.3f}, SSIM training stack = {:4.3f}, SSIM eval stack = {:4.3f}   {:1d}    {:04d}/{:04d}"
            .format(i - 1, i, quant_training_stack, quant_eval_stack,
                    ssim_training, ssim_eval, value, training,
                    args['nb_trainings'] - 1))

    ## Save the offline fine-tuned network
    torch.save([model, optimizer], output_path + "final_mf2f.pth")

    print("")
    print("")
    print(
        "     Process the entire video with the final offline fine-tuned network"
    )

    for i in range(args['first'] + 4, args['last'] - 3):

        ut_moins_4 = reads_image(args['input'] % (i - 4), H, W)
        ut_moins_3 = reads_image(args['input'] % (i - 3), H, W)
        ut_moins_2 = reads_image(args['input'] % (i - 2), H, W)
        ut_moins_1 = reads_image(args['input'] % (i - 1), H, W)
        ut = reads_image(args['input'] % (i), H, W)
        ut_plus_1 = reads_image(args['input'] % (i + 1), H, W)
        ut_plus_2 = reads_image(args['input'] % (i + 2), H, W)
        ut_plus_3 = reads_image(args['input'] % (i + 3), H, W)
        ut_plus_4 = reads_image(args['input'] % (i + 4), H, W)

        #Creation of the stack

        inframes = [ut_moins_4, ut_moins_2, ut, ut_plus_2, ut_plus_4]
        stack = torch.stack(inframes, dim=0).contiguous().view(
            (1, 5 * C, H, W)).cuda()

        #Compute and save the denoising
        model.eval()
        with torch.no_grad():
            #denoise with the trianing stack :
            outimg = temp_denoise(model, stack, noise_map)
            outimg = tensor_to_image(outimg)

            #denoise with the natural stack
            inframes = [ut_moins_2, ut_moins_1, ut, ut_plus_1, ut_plus_2]
            stack = torch.stack(inframes, dim=0).contiguous().view(
                (1, 5 * C, H, W)).cuda()
            outimg2 = temp_denoise(model, stack, noise_map)
            outimg2 = tensor_to_image(outimg2)

        # Load frame to compute the PSNR
        ref_frame = iio.read(args['ref'] % (i))[:val, :val, :]

        # Compute the PSNR according to the reference frame
        quant_training_stack = psnr(
            ref_frame.astype(outimg.dtype) / 255, outimg)
        quant_eval_stack = psnr(
            ref_frame.astype(outimg2.dtype) / 255., outimg2)
        if quant_eval_stack > quant_training_stack:
            value = 1
        else:
            value = 0

        ssim_training = ssim(outimg * 255, ref_frame)
        ssim_eval = ssim(outimg2 * 255, ref_frame)

        print(
            "Itération = {:03d}, PSNR training stack = {:5.3f}, PSNR eval stack = {:5.3f}, SSIM training stack = {:4.3f}, SSIM eval stack = {:4.3f}   {:1d}"
            .format(i, quant_training_stack, quant_eval_stack, ssim_training,
                    ssim_eval, value))

        iio.write(output_path + "training_{:03d}.png".format(i), 255 * outimg)
        iio.write(args['output'] % (i), 255 * outimg2)

        list_PSNR_training.append(quant_training_stack)
        list_PSNR_eval.append(quant_eval_stack)
        plot_psnr.write(str(quant_eval_stack) + '\n')
        plot_ssim.write(str(ssim_eval) + '\n')
        plot_psnr_training.write(str(quant_training_stack) + '\n')
        plot_ssim_training.write(str(ssim_training) + '\n')

    tab_PSNR_training = np.array(list_PSNR_training)
    tab_PSNR_eval = np.array(list_PSNR_eval)
    print(
        "Average PSNR: training stack = {:5.3f}, eval stack = {:5.3f}".format(
            np.mean(tab_PSNR_training), np.mean(tab_PSNR_eval)))
    plot_psnr.close()
    plot_ssim.close()
    plot_psnr_tot.close()
    plot_ssim_tot.close()
    plot_psnr_training.close()
    plot_ssim_training.close()
コード例 #19
0
def blind_denoising(**args):
    """
    Main function
    args: Parameters
    """

    ##########
    # LOAD THE DATA
    ##########

    np.random.seed(2019)
    if args['real']:
        print('Give sigma already normalized')
        sigma = args['sigma']
    else:
        sigma = args['sigma'] / 255

    sigma = min(max(sigma, 0.), 0.0784)

    model = load_net()
    model.cuda()

    dtype = torch.cuda.FloatTensor

    ########################
    # FINE-TUNING PARAMETERS
    ########################

    # Define loss
    lr = args['lr']
    criterion = WarpedLoss()
    criterion.cuda()
    optimizer = optim.Adam(model.parameters(), lr=lr)

    model.train()
    model.zero_grad()
    optimizer.zero_grad()

    #######################
    # MAIN LOOP FINE-TUNING
    #######################

    for idx1 in range(args['frames'] - 1, -1, -1):
        for idx2 in range(args['frames']):
            if idx1 != idx2:
                # read the two images
                im1 = piio.read(args['input'] % idx1).squeeze().astype(
                    np.float)
                if len(im1.shape) < 4:
                    im1 = np.expand_dims(im1, 0)
                    im1 = np.expand_dims(im1, 0)
                    if args['real']:
                        im1 /= 65535.
                    else:
                        im1 /= 255.

                im1 = np.pad(im1, ((0, 0), (0, 0), (48, 48), (48, 48)),
                             'symmetric')

                im1 = torch.Tensor(im1).cuda().repeat(1, 3, 1, 1)
                curr_frame_var, _ = rgb2bayer(im1)

                im2 = piio.read(args['input'] % idx2).squeeze().astype(
                    np.float)
                if len(im2.shape) < 4:
                    im2 = np.expand_dims(im2, 0)
                    im2 = np.expand_dims(im2, 0)
                    if args['real']:
                        im2 /= 65535.
                    else:
                        im2 /= 255.

                prev_frame_var = torch.Tensor(im2).cuda().repeat(1, 3, 1, 1)
                B, C, H, W = prev_frame_var.size()

                # read the transform
                p_file = open(args['input_p'] % (idx1, idx2), 'r')
                p = p_file.readline()
                p = p_file.readline()
                p = list(map(float, p.split(' ')[:-1]))

                sample = {
                    "mosaic": curr_frame_var,
                    "noise_level": sigma * torch.ones(1).cuda()
                }

                model.train()
                optimizer.zero_grad()

                # Do noise2noise1shot learning
                for it in range(args['iter']):
                    out_train = model(sample)
                    BO, CO, HO, WO = out_train.size()
                    DH = (HO - H) // 2
                    DW = (WO - W) // 2
                    loss = criterion(out_train[:, :, DH:-DH, DW:-DW],
                                     prev_frame_var, p)
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

    # Estimate the quality after overfitting
    noisy = piio.read(args['input'] % 0).squeeze().astype(np.float)
    if len(noisy.shape) < 4:
        noisy = np.expand_dims(noisy, 0)
        noisy = np.expand_dims(noisy, 0)
        if args['real']:
            noisy /= 65535.
        else:
            noisy /= 255.

    H = noisy.shape[2]
    W = noisy.shape[3]
    noisy = np.pad(noisy, ((0, 0), (0, 0), (48, 48), (48, 48)), 'symmetric')
    curr_frame_var, _ = rgb2bayer(
        Variable(torch.Tensor(noisy).cuda().repeat(1, 3, 1, 1)))
    sample = {
        "mosaic": curr_frame_var,
        "noise_level": sigma * torch.ones(1).cuda()
    }
    with torch.no_grad():  # PyTorch v0.4.0
        out = model(sample)
    BO, CO, HO, WO = out.size()
    DH = (HO - H) // 2
    DW = (WO - W) // 2
    out = out[:, :, DH:-DH, DW:-DW]
    out = out.cpu().numpy().transpose(2, 3, 1, 0).squeeze().clip(0, 1)

    if args['ref'] is not None:
        ref = piio.read(args['ref']).squeeze().astype(np.float) / 255.
        quant_psnr = psnr(ref, out)
        quant_ssim = compare_ssim(ref, out, data_range=1., multichannel=True)
        print(quant_psnr, quant_ssim)

    if args['real']:
        out *= 65535.
    else:
        out *= 255.
    piio.write(args['output'], out)

    if args['output_network'] is not None:
        torch.save([model, optimizer], args['output_network'])