Exemple #1
0
 def _init_images(self):
     self.image1_torch = np_to_torch(self.image1).type(
         torch.cuda.FloatTensor)
     self.image2_torch = np_to_torch(self.image2).type(
         torch.cuda.FloatTensor)
     if self.watermark_hint is not None:
         self.watermark_hint_torch = np_to_torch(self.watermark_hint).type(
             torch.cuda.FloatTensor)
Exemple #2
0
def get_video_noise(input_depth,
                    method,
                    temporal_size,
                    spatial_size,
                    noise_type='u',
                    var=1. / 100,
                    type="dependant"):
    """
    Returns a pytorch.Tensor of size (frame_number x `input_depth` x `spatial_size[0]` x `spatial_size[1]`)
    initialized in a specific way.
    Args:
        input_depth: number of channels in the tensor
        method: `noise` for fillting tensor with noise; `meshgrid` for np.meshgrid
        temporal_size: number of frames
        spatial_size: spatial size of the tensor to initialize
        noise_type: 'u' for uniform; 'n' for normal
        var: a factor, a noise will be multiplicated by. Basically it is standard deviation scaler.
    """
    if isinstance(spatial_size, int):
        spatial_size = (spatial_size, spatial_size)
    if method == 'noise':
        all_noise = []
        for i in range(temporal_size):
            shape = [input_depth, spatial_size[0], spatial_size[1]]
            if len(all_noise) > 0:
                if type == "dependant":
                    frame = np.random.uniform(0, 1, size=shape)
                    frame *= var
                    all_noise.append(all_noise[-1] + frame)
                elif type == "half_dependant":
                    frame = np.random.uniform(0, 1, size=shape)
                    frame *= var
                    new_noise = (all_noise[-1] + frame)
                    new_noise[:input_depth //
                              2, :, :] = (var * 10) * np.random.uniform(
                                  0, 1, size=shape)[:input_depth // 2, :, :]
                    all_noise.append(new_noise)
            else:
                frame = np.random.uniform(-0.5, 0.5, size=shape)
                frame *= (var * 10)
                all_noise.append(frame)
        return np_to_torch(np.array(all_noise))[0]
    elif method == 'meshgrid':
        assert False
        assert input_depth % 2 == 0
        X, Y = np.meshgrid(
            np.arange(0, spatial_size[1]) / float(spatial_size[1] - 1),
            np.arange(0, spatial_size[0]) / float(spatial_size[0] - 1))
        meshgrid = np.concatenate([X[None, :], Y[None, :]] *
                                  (input_depth // 2))
        net_input = np_to_torch(meshgrid)
    else:
        assert False

    return net_input
 def _init_image(self):
     self.image_torch1 = np_to_torch(self.image1).type(
         torch.cuda.FloatTensor)
     self.image_torch2 = np_to_torch(self.image2).type(
         torch.cuda.FloatTensor)
     self.first_half = np.zeros_like(self.image1)
     self.first_half[:, :, :self.first_half.shape[2] // 2] = 1.
     self.first_half_torch = np_to_torch(self.first_half).type(
         torch.cuda.FloatTensor)
     self.second_half = np.zeros_like(self.image1)
     self.second_half[:, :, self.second_half.shape[2] // 2:] = 1.
     self.second_half_torch = np_to_torch(self.second_half).type(
         torch.cuda.FloatTensor)
Exemple #4
0
def get_noise(input_depth, method, spatial_size, noise_type='u', var=1. / 100):
    """
    Returns a pytorch.Tensor of size (1 x `input_depth` x `spatial_size[0]` x `spatial_size[1]`)
    initialized in a specific way.
    Args:
        input_depth: number of channels in the tensor
        method: `noise` for fillting tensor with noise; `meshgrid` for np.meshgrid
        spatial_size: spatial size of the tensor to initialize
        noise_type: 'u' for uniform; 'n' for normal
        var: a factor, a noise will be multiplicated by. Basically it is standard deviation scaler.
    """
    if isinstance(spatial_size, int):
        spatial_size = (spatial_size, spatial_size)
    if method == 'noise':
        shape = [1, input_depth, spatial_size[0], spatial_size[1]]
        net_input = torch.zeros(shape)

        fill_noise(net_input, noise_type)
        net_input *= var
    elif method == 'meshgrid':
        assert input_depth % 2 == 0
        X, Y = np.meshgrid(np.arange(0, spatial_size[1]) / float(spatial_size[1] - 1),
                           np.arange(0, spatial_size[0]) / float(spatial_size[0] - 1))
        meshgrid = np.concatenate([X[None, :], Y[None, :]] * (input_depth // 2))
        net_input = np_to_torch(meshgrid)
    else:
        assert False

    return net_input
Exemple #5
0
def denoising(noise_im,
              clean_im,
              LR=1e-2,
              sigma=5,
              rho=1,
              eta=0.5,
              total_step=20,
              prob1_iter=500,
              result_root=None,
              f=None):

    input_depth = 3
    latent_dim = 3

    en_net = Encoder(input_depth,
                     latent_dim,
                     down_sample_norm='batchnorm',
                     up_sample_norm='batchnorm').cuda()
    de_net = Decoder(latent_dim,
                     input_depth,
                     down_sample_norm='batchnorm',
                     up_sample_norm='batchnorm').cuda()

    model = net(3, 3, nc=64, nb=20, act_mode='R')
    model_path = '/home/dihan/KAIR/model_zoo/dncnn_color_blind.pth'
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False

    model = model.cuda()

    noise_im_torch = np_to_torch(noise_im)
    noise_im_torch = noise_im_torch.cuda()

    with torch.no_grad():
        r_dncnn_np = torch_to_np(model(noise_im_torch))

    psnr_dncnn = compare_psnr(clean_im.transpose(1, 2, 0),
                              r_dncnn_np.transpose(1, 2, 0), 1)
    ssim_dncnn = compare_ssim(r_dncnn_np.transpose(1, 2, 0),
                              clean_im.transpose(1, 2, 0),
                              multichannel=True,
                              data_range=1)

    print('PSNR_DNCNN: {}, SSIM_DNCNN: {}'.format(psnr_dncnn, ssim_dncnn),
          file=f,
          flush=True)

    parameters = [p for p in en_net.parameters()
                  ] + [p for p in de_net.parameters()]
    optimizer = torch.optim.Adam(parameters, lr=LR)
    l2_loss = torch.nn.MSELoss(reduction='sum').cuda()

    i0 = np_to_torch(noise_im).cuda()
    Y = torch.zeros_like(noise_im_torch).cuda()

    i0_til_torch = np_to_torch(noise_im).cuda()

    diff_original_np = noise_im.astype(np.float32) - clean_im.astype(
        np.float32)
    diff_original_name = 'Original_dis.png'
    save_hist(diff_original_np, result_root + diff_original_name)

    best_psnr = 0
    best_ssim = 0

    for i in range(total_step):

        ############################### sub-problem 1 #################################

        for i_1 in range(prob1_iter):

            optimizer.zero_grad()

            mean, log_var = en_net(noise_im_torch)

            z = sample_z(mean, log_var)
            out = de_net(z)

            total_loss = 0.5 * l2_loss(out, noise_im_torch)
            total_loss += kl_loss(mean, log_var, i0, sigma)
            total_loss += (rho / 2) * l2_loss(i0 + Y, i0_til_torch)

            total_loss.backward()
            optimizer.step()

            with torch.no_grad():
                i0 = ((1 / sigma**2) * mean + rho *
                      (i0_til_torch - Y)) / ((1 / sigma**2) + rho)

        with torch.no_grad():

            ############################### sub-problem 2 #################################

            i0_til_torch = model(i0 + Y)

            ############################### sub-problem 3 #################################

            Y = Y + eta * (i0 - i0_til_torch)

            ###############################################################################

            i0_np = torch_to_np(i0)
            Y_np = torch_to_np(Y)

            denoise_obj_pil = np_to_pil((i0_np + Y_np).clip(0, 1))

            Y_norm_np = np.sqrt((Y_np * Y_np).sum(0))

            i0_pil = np_to_pil(i0_np)

            mean_np = torch_to_np(mean)

            mean_pil = np_to_pil(mean_np)

            out_np = torch_to_np(out)
            out_pil = np_to_pil(out_np)

            diff_np = mean_np - clean_im

            denoise_obj_name = 'denoise_obj_{:04d}'.format(i) + '.png'
            Y_name = 'Y_{:04d}'.format(i) + '.png'
            i0_name = 'i0_num_epoch_{:04d}'.format(i) + '.png'
            mean_i_name = 'Latent_im_num_epoch_{:04d}'.format(i) + '.png'
            out_name = 'res_of_dec_num_epoch_{:04d}'.format(i) + '.png'
            diff_name = 'Latent_dis_num_epoch_{:04d}'.format(i) + '.png'

            denoise_obj_pil.save(result_root + denoise_obj_name)
            save_heatmap(Y_norm_np, result_root + Y_name)
            i0_pil.save(result_root + i0_name)
            mean_pil.save(result_root + mean_i_name)
            out_pil.save(result_root + out_name)
            save_hist(diff_np, result_root + diff_name)

            i0_til_np = torch_to_np(i0_til_torch).clip(0, 1)

            psnr = compare_psnr(clean_im.transpose(1, 2, 0),
                                i0_til_np.transpose(1, 2, 0), 1)
            ssim = compare_ssim(clean_im.transpose(1, 2, 0),
                                i0_til_np.transpose(1, 2, 0),
                                multichannel=True,
                                data_range=1)
            i0_til_pil = np_to_pil(i0_til_np)
            i0_til_pil.save(os.path.join(result_root, '{}'.format(i) + '.png'))

            print('Iteration: %02d, VAE Loss: %f, PSNR: %f, SSIM: %f' %
                  (i, total_loss.item(), psnr, ssim),
                  file=f,
                  flush=True)

            if best_psnr < psnr:
                best_psnr = psnr
                best_ssim = ssim
            else:
                break

    return i0_til_np, best_psnr, best_ssim
Exemple #6
0
 def _init_images(self):
     self.images_torch = [
         np_to_torch(image).type(torch.cuda.FloatTensor)
         for image in self.images
     ]