Beispiel #1
0
def father_to_son(hr_father):
    sf = np.array([1.0, 1.5])
    lr_son = imresize(hr_father, 1.0 / sf, kernel='cubic')
    print(lr_son.shape)
    lr_son = imresize(lr_son, sf, kernel='cubic')
    print(lr_son.shape)
    return np.clip(lr_son, 0, 1)
Beispiel #2
0
def back_projection(y_sr, y_lr, down_kernel, up_kernel):
    y_sr += imresize(
        y_lr -
        imresize(y_sr, output_shape=y_lr.shape[0:2], kernel=down_kernel),
        output_shape=y_sr.shape[0:2],
        kernel=up_kernel)
    return np.clip(y_sr, 0, 1)
def next_batch(vimagedata, scalar_scale_param=scale_parameter):
    data_ik = get_data(vimagedata)
    count = 0
    batch_data = None
    while True:
        vdatal = []
        vdatah = []
        vdatab = []
        if np.shape(data_ik)[0] > batch_size:
            batch_data, data_ik = data_ik[0:batch_size, :, :], data_ik[
                batch_size:, :, :]
        elif np.shape(data_ik)[0] == batch_size:
            batch_data = data_ik
            data_ik = get_data(vimagedata)
        else:
            data_ik = np.concatenate((data_ik, get_data(vimagedata)), axis=0)
            continue
        for i in range(batch_size):
            highres = batch_data[i, :, :]

            lowres = imresize(highres, scalar_scale=1.0 / scalar_scale_param)
            bicubic = imresize(lowres, scalar_scale=scalar_scale_param)

            vdatal.append(lowres)
            vdatah.append(highres)
            vdatab.append(bicubic)

        yield np.asarray(vdatah), np.asarray(vdatal), np.asarray(vdatab)
Beispiel #4
0
    def test_imresize_reversed(self):
        """ Tests imresize function
        """
        A = imresize(np.random.randn(100, 100), (100, 100))
        B = imresize(A, (200, 200))
        C = imresize(B, (100, 100))

        residual = np.linalg.norm(A - C) / np.linalg.norm(A)
        assert residual < 0.1
def back_projection(y_sr, y_lr, down_kernel, up_kernel, sf=None):
    y_sr += imresize(y_lr - imresize(y_sr,
                                     scale_factor=1.0/sf,
                                     output_shape=y_lr.shape,
                                     kernel=down_kernel),
                     scale_factor=sf,
                     output_shape=y_sr.shape,
                     kernel=up_kernel)
    return np.clip(y_sr, 0, 1)
Beispiel #6
0
def back_projection(y_sr, y_lr, down_kernel, up_kernel, sf=None):
    """Projects the error between the downscaled SR image and the LR image"""
    y_sr += imresize(y_lr - imresize(y_sr,
                                     scale_factor=1.0 / sf,
                                     output_shape=y_lr.shape,
                                     kernel=down_kernel),
                     scale_factor=sf,
                     output_shape=y_sr.shape,
                     kernel=up_kernel)
    return np.clip(y_sr, 0, 1)
Beispiel #7
0
def back_projection(y_sr,
                    y_lr,
                    down_kernel,
                    up_kernel,
                    sf=None,
                    ds_method='direct'):
    y_sr += imresize(y_lr - imresize(y_sr,
                                     scale=1.0 / sf,
                                     output_shape=y_lr.shape,
                                     kernel=down_kernel,
                                     ds_method=ds_method),
                     scale=sf,
                     output_shape=y_sr.shape,
                     kernel=up_kernel)
    return np.clip(y_sr, 0, 1)
    def inference(self, input_img, scale, depth):
        if (np.max(input_img) > 1):
            infer_image = (input_img / 255).astype(np.float32)

        infer_image_scaled = imresize(input_img,
                                      scalar_scale=scale,
                                      output_shape=None)

        size = infer_image_scaled.shape
        if (len(infer_image_scaled.shape) == 3):
            infer_image_input = infer_image_scaled[:, :, 0].reshape(
                1, size[0], size[1], 1)
        else:
            infer_image_input = infer_image_scaled.reshape(
                1, size[0], size[1], 1)

        sr_img = self.sess.run(self.preds_test[depth],
                               feed_dict={self.image_test: infer_image_input})
        #sr_img = np.expand_dims(sr_img,axis=-1)

        if (len(infer_image_scaled.shape) == 3):
            infer_image_scaled[:, :, 0] = sr_img[0, :, :, 0]
        else:
            infer_image_scaled = sr_img[0]

        # output dim [w, d, c]
        return infer_image_scaled
def brisque(image):
    y_mscn = compute_image_mscn_transform(image)
    half_scale = imresize(image, scalar_scale=0.5, method='bicubic')
    y_half_mscn = compute_image_mscn_transform(half_scale)
    feats_full = extract_subband_feats(y_mscn)
    feats_half = extract_subband_feats(y_half_mscn)
    return np.concatenate((feats_full, feats_half))
Beispiel #10
0
 def test_imresize(self):
     """ Tests imresize function
     """
     A = np.random.randn(100, 100)
     m, n = imresize(A, (50, 200)).shape
     assert m == 50
     assert n == 200
Beispiel #11
0
 def test_float_size(self):
     """ Tests imresize function
     """
     A = np.random.randn(100, 100)
     m, n = imresize(A, 0.5).shape
     assert m == 50
     assert n == 50
Beispiel #12
0
def lpips_analysis(gt, srs, scale):
    from collections import OrderedDict
    results = OrderedDict()

    gt = imread(gt)
    h, w, _ = gt.shape
    gt = gt[:(h // 8) * 8, :(w // 8) * 8]
    srs = [imread(sr) for sr in srs]

    lpipses_sp = []
    lpipses_gl = []
    lrpsnrs = []
    n_samples = len(srs)

    for sample_idx in tqdm.trange(n_samples):
        sr = srs[sample_idx]

        h1, w1, _ = gt.shape
        sr = sr[:h1, :w1]

        lpips_sp = loss_fn_alex_sp(2 * t(sr) - 1, 2 * t(gt) - 1)
        lpipses_sp.append(lpips_sp)
        lpipses_gl.append(lpips_sp.mean().item())

        imgA_lr = imresize(sr, 1 / scale)
        imgB_lr = imresize(gt, 1 / scale)
        lrpsnr = psnr(imgA_lr, imgB_lr)
        lrpsnrs.append(lrpsnr)

    lpips_gl = np.min(lpipses_gl)

    results['LPIPS_mean'] = np.mean(lpipses_gl)
    results['LRPSNR_worst'] = np.min(lrpsnrs)
    results['LRPSNR_mean'] = np.mean(lrpsnrs)

    lpipses_stacked = torch.stack([l[0, 0, :, :] for l in lpipses_sp], dim=2)

    lpips_best_sp, _ = torch.min(lpipses_stacked, dim=2)
    lpips_loc = lpips_best_sp.mean().item()

    score = (lpips_gl - lpips_loc) / lpips_gl * 100

    results['score'] = score

    dprint(results)

    return results
Beispiel #13
0
def yanzheng(g, l):
    huanyuan = []
    for i in range(len(l)):
        t = l[i] + conv.Convolve(imresize.imresize(g[i + 1], g[i].shape),
                                 gskernel(3, 1), 3)
        huanyuan.append(t)
    listImShow(huanyuan)
    return huanyuan
Beispiel #14
0
 def create_prob_maps(self, scale_factor):
     # Create loss maps for input image and downscaled one
     loss_map_big = create_gradient_map(self.input_image)
     loss_map_sml = create_gradient_map(imresize(im=self.input_image, scale_factor=scale_factor, kernel='cubic'))
     # Create corresponding probability maps
     prob_map_big = create_probability_map(loss_map_big, self.d_input_shape)
     prob_map_sml = create_probability_map(nn_interpolation(loss_map_sml, int(1 / scale_factor)), self.g_input_shape)
     return prob_map_big, prob_map_sml
def spatial_backprojection_np_cpu(hr_pred, lr_in):
    lrsz = lr_in[..., 0, :].shape
    hrsz = hr_pred[..., 0, :].shape

    # resizer = torch_resizer.Resizer(hr_pred[..., 0, :].shape, output_shape=lrsz, kernel='cubic', antialiasing=True, device='cuda', dtype=torch.float16)
    # resizer2 = torch_resizer.Resizer(lrsz, output_shape=hrsz, kernel='cubic', antialiasing=True, device='cuda', dtype=torch.float16)

    for fidx in range(hr_pred.shape[2]):
        err_ = imresize(hr_pred[..., fidx, :],
                        output_shape=lrsz,
                        kernel='cubic') - lr_in[..., fidx, :]
        hr_pred[..., fidx, :] -= imresize(err_,
                                          output_shape=hrsz,
                                          kernel='cubic')
        # err_ = resizer(torch.tensor(hr_pred[..., fidx, :], dtype=torch.float16).to(device)).cpu().numpy() - lr_in[..., fidx, :]
        # hr_pred[..., fidx, :] -= resizer2(torch.tensor(err_, dtype=torch.float16).to(device)).cpu().numpy()
    return hr_pred
Beispiel #16
0
def lin_interpolate_2(data_path, save_path):
    """
    used as baseline for comparison
    """
    video_tensor = np.asarray(read_seq_from_folder(data_path, "", "float32"))
    resized_tensor = np.clip(
        imresize(video_tensor, scale_factor=[2, 1, 1, 1], kernel="linear"), 0.,
        1.)
    save_output_result(resized_tensor, save_path)
Beispiel #17
0
 def test_rgb(self):
     """ Tests imresize function
     """
     A = np.random.randn(100, 100, 3)
     B = imresize(A, (50, 100))
     m, n, c = B.shape
     assert m == 50
     assert n == 100
     assert c == 3
Beispiel #18
0
    def __getitem__(self, index):
        # load image
        img = Image.open(self.image_filenames[index]).convert('RGB')
        img = np.asarray(img)
        img = util.modcrop(img, self.scale_factor)
        # original HR image size
        hr_img_w = img.shape[0]
        hr_img_h = img.shape[1]

        # determine lr_img LR image size
        lr_img_w = hr_img_w // self.scale_factor
        lr_img_h = hr_img_h // self.scale_factor

        # only Y-channel is super-resolved
        if self.is_gray:
            img = np.asarray(img)
            img = color.rgb2ycbcr(img) / 255
            channel = len(img.shape)
            img, _, _ = np.split(img, indices_or_sections=channel, axis=-1)
            # img = img.convert('YCbCr')
            # precision degrade from float64 to float32
            img = Image.fromarray(img.squeeze())

        # hr_img HR image
        tensor_transform = tfs.ToTensor()
        hr_img = tensor_transform(img)

        # lr_img LR image
        img = np.asarray(img)
        if not self.bic_inp:
            lr_img = imresize(img, output_shape=(lr_img_w, lr_img_h))
        else:
            lr_img = imresize(imresize(img, output_shape=(lr_img_w, lr_img_h)),
                              output_shape=(hr_img_w, hr_img_h))

        bc_img = imresize(lr_img, output_shape=(hr_img_w, hr_img_h))

        bc_img = Image.fromarray(bc_img.squeeze())
        lr_img = Image.fromarray(lr_img.squeeze())
        bc_img = tensor_transform(bc_img)
        lr_img = tensor_transform(lr_img)

        return lr_img, hr_img, bc_img
Beispiel #19
0
    def make_data_tensor(self, sess, scale_list, noise_std=0.0):
        label_train_ = sess.run(self.label_train)

        input_meta = []
        label_meta = []

        for t in range(self.META_BATCH_SIZE):
            input_task = []
            label_task = []

            scale = np.random.choice(scale_list, 1)[0]
            Kernel = generate_kernel(k1=scale * 2.5, ksize=15)
            for idx in range(self.TASK_BATCH_SIZE * 2):
                img_HR = label_train_[t * self.TASK_BATCH_SIZE * 2 + idx]
                clean_img_LR = imresize(img_HR,
                                        scale=1. / scale,
                                        kernel=Kernel)

                img_LR = np.clip(
                    clean_img_LR +
                    np.random.randn(*clean_img_LR.shape) * noise_std, 0., 1.)

                img_ILR = imresize(img_LR,
                                   scale=scale,
                                   output_shape=img_HR.shape,
                                   kernel='cubic')

                input_task.append(img_ILR)
                label_task.append(img_HR)

            input_meta.append(np.asarray(input_task))
            label_meta.append(np.asarray(label_task))

        input_meta = np.asarray(input_meta)
        label_meta = np.asarray(label_meta)

        inputa = input_meta[:, :self.TASK_BATCH_SIZE, :, :]
        labela = label_meta[:, :self.TASK_BATCH_SIZE, :, :]
        inputb = input_meta[:, self.TASK_BATCH_SIZE:, :, :]
        labelb = label_meta[:, self.TASK_BATCH_SIZE:, :, :]

        return inputa, labela, inputb, labelb
Beispiel #20
0
    def maxEntropyEnhance(I, isBad=None):
        Y = rgb2gm(np.real(np.maximum(imresize(I, output_shape=(50, 50)), 0)))
        # imresize(I, output_shape=(50, 50)) matches MATLAB with tolerance = 1e-14
        # np.real(np.maximum(..., 0)) usually does nothing
        # Y matches MATLAB with tolerance = 1e-16

        if not (isBad is None):
            isBad = (255 * isBad).astype(
                np.uint8)  # converts bool array to uint8 array * 255
            isBad = imresize(isBad, output_shape=(50, 50))
            isBad = isBad > 128  # converts uint8 array to bool array
            # isBad matches MATLAB exactly
            Y = Y.T[isBad.T]
            # since python iterates row-first, we must take transpose of Y and isBad to iterate column-first like MATLAB
            # Y matches MATLAB with tolerance = 1e-15
            # we leave Y in the shape of a 1D array instead of converting to a column vector like in MATLAB

        if Y.size == 0:
            J = I
            return J

        def find_negative_entropy(Y, k):
            applied_k = applyK(Y, k)
            applied_k[applied_k > 1] = 1
            applied_k[applied_k < 0] = 0
            scaled_applied_k = 255 * applied_k + 0.5  # we add 0.5 to round like MATLAB instead of truncating
            int_applied_k = scaled_applied_k.astype(np.uint8)
            hist = np.bincount(int_applied_k, minlength=256)
            nonzero_hist = hist[hist != 0]
            normalized_hist = 1.0 * nonzero_hist / applied_k.size
            negative_entropy = np.sum(normalized_hist *
                                      np.log2(normalized_hist))
            return negative_entropy

        opt_k = fminbound(func=lambda k: find_negative_entropy(Y, k),
                          x1=1.0,
                          x2=7.0,
                          full_output=False)
        J = applyK(I, opt_k) - 0.01  # J has tolerance of 1e-5

        return J
Beispiel #21
0
def make_data_tensor(scale, noise_std=0.0):
    label_train = metaTrainDataset('data/DIV2K_train.h5')

    input_meta = []
    label_meta = []

    for t in range(META_BATCH_SIZE):
        input_task = []
        label_task = []

        Kernel = generate_kernel(k1=scale * 2.5, ksize=15)
        for idx in range(TASK_BATCH_SIZE * 2):
            img_HR = label_train[t * TASK_BATCH_SIZE * 2 + idx][-1]
            # add isotropic and anisotropic Gaussian kernels for the blur kernels
            # and downsample
            clean_img_LR = imresize(img_HR, scale=1. / scale, kernel=Kernel)
            # add noise
            img_LR = np.clip(
                clean_img_LR +
                np.random.randn(*clean_img_LR.shape) * noise_std, 0., 1.)
            # used cubic upsample
            img_ILR = imresize(img_LR,
                               scale=scale,
                               output_shape=img_HR.shape,
                               kernel='cubic')

            input_task.append(img_ILR)
            label_task.append(img_HR)

        input_meta.append(np.asarray(input_task))
        label_meta.append(np.asarray(label_task))

    input_meta = np.asarray(input_meta)
    label_meta = np.asarray(label_meta)

    inputa = input_meta[:, :TASK_BATCH_SIZE, :, :]
    labela = label_meta[:, :TASK_BATCH_SIZE, :, :]
    inputb = input_meta[:, TASK_BATCH_SIZE:, :, :]
    labelb = label_meta[:, TASK_BATCH_SIZE:, :, :]

    return inputa, labela, inputb, labelb
Beispiel #22
0
    def __init__(self, Generator, Discriminator, opt):
        self.Generator = Generator
        self.Discriminator = Discriminator
        self.opt = opt

        ### Set parameters for the training of the 0th layer
        self.Gs = []  # Generator list for each scale
        self.Zs = []  # Optimal noise list for each scale [z*, 0, 0, ..., 0]
        self.NoiseAmp = [
        ]  # Ratio of noise when merging with the output of the previous layer for each scale
        self.in_s = 0  # 0 Tensor with the downsampled dimensions of the input image for scale 0

        ### TrainedModel Directory
        dir2save = generate_dir2save(self.opt)
        if (os.path.exists(dir2save)):
            print(
                "Would you look at that, the TrainedModel directory already exists!"
            )
        else:
            try:
                os.makedirs(dir2save)
            except OSError:
                print("Making the directory really didn't work out, hyelp")

        # In case we're not training, load existing model
        if self.opt.mode != 'train':
            self.Gs, self.Zs, _, _, self.NoiseAmp = load_trained_pyramid(
                self.opt)

            # We might wish to replace content or style images
            if self.opt.test_content is not None:
                self.opt.content = self.opt.test_content

            if self.opt.test_style is not None:
                self.opt.style = self.opt.test_style

        ### Content image pyramid
        self.real_ = read_image(self.opt)
        self.style_ = read_image(self.opt, style=True)

        if self.style_.shape != self.real_.shape:
            self.style_ = imresize_to_shape(
                self.style_, [self.real_.shape[2], self.real_.shape[3]], opt)
            self.style_ = self.style_[:, :, :self.real_.shape[2], :self.real_.
                                      shape[3]]

        # "adjust_scales2image" also arranges network parameters according to input dimensions
        assert self.real_.shape == self.style_.shape
        self.real = adjust_scales2image(self.real_, self.opt)
        self.reals = create_reals_pyramid(self.real, self.opt)

        self.style = imresize(self.style_, self.opt.scale1, self.opt)
        self.styles = create_reals_pyramid(self.style, self.opt)
Beispiel #23
0
def lpyr(gsim):
    lplcim = []
    for i in range(len(gsim) - 1):
        #        t=gsim[i]-imresize.imresize(gsim[i+1],gsim[i].shape)
        t = gsim[i] - conv.Convolve(
            imresize.imresize(gsim[i + 1], gsim[i].shape), gskernel(3, 1), 3)
        #        tmin=t.min()
        #        t=t+abs(tmin)
        #        tmax=t.max()
        #        t=t/tmax*255.0
        lplcim.append(t)
    return lplcim
def smoothBL_BMA(BL):
    import numpy as np
    from imresize import imresize
    K = 4
    a = 0.38
    b = 0.11
    c = 0.08
    d = 0.06

    for k in range(K):
        M = np.size(BL, axis=0)
        N = np.size(BL, axis=1)
        BL1 = np.zeros((M + 2, N + 2))
        BL2 = np.zeros((M + 2, N + 2))
        #镜像
        BL1[1:M+1, 1:N+1] = BL  #切片最后一位不算入

        BL1[0, 1:N+1] = BL[0, 0:N]
        BL1[M+1, 1:N+1] = BL[M-1, 0:N]

        BL1[1:M+1, 0] = BL[0:M, 0]
        BL1[1:M+1, N + 1] = BL[0:M, N - 1]

        BL1[0, 0] = BL[0, 0]
        BL1[0, N + 1] = BL[0, N - 1]
        BL1[M + 1, 0] = BL[M - 1, 0]
        BL1[M + 1, N + 1] = BL[M - 1, N - 1]
        #混光
        BL2[0:M+2, 0] = BL1[0:M+2, 0]
        BL2[0:M+2, N + 1] = BL1[0:M+2, N + 1]
        BL2[0, 0:N+2] = BL1[0, 0:N+2]
        BL2[M + 1, 0:N+2] = BL1[M + 1, 0:N+2]
        dd = BL1[0:M, 0:N] + BL1[0:M, 2:N+2] + BL1[2:M+2, 0:N] + BL1[2:M+2, 2:N+2]
        BL2[1:M+1, 1:N+1] = a * BL1[1:M+1, 1:N+1] + b * (BL1[1:M+1, 0:N] + BL1[1:M+1, 2:N+2]) + c * (BL1[0:M, 1:N+1] + BL1[2:M+2, 1:N+1]) + d * dd
        #插值
        BL2 = imresize(BL2, 2, 'bilinear')
        BL = BL2
    BL2 = imresize(BL2, 2, 'bicubic')
    return BL2
Beispiel #25
0
    def __getitem__(self, idx):
        """Get a crop for both G and D """
        g_in = self.next_crop(for_g=True, idx=idx)
        d_in = self.next_crop(for_g=False, idx=idx)
        d_bq = imresize(im=d_in,
                        scale_factor=int(1 /
                                         self.conf.scale_factor_downsampler),
                        kernel='cubic')

        return {
            'HR': im2tensor(g_in).squeeze(),
            'LR': im2tensor(d_in).squeeze(),
            'LR_bicubic': im2tensor(d_bq).squeeze()
        }
Beispiel #26
0
def create_reals_pyramid(real, opt):
    """
    Creates downsampled versions of the training image for each scale.

    Arguments:
        real (torch.cuda.FloatTensor) : Input image.
        opt (argparse.ArgumentParser) : Command line arguments.

    Returns:
        reals (list) : Downscaled real image list for each scale
    """
    reals = []
    real = real[:, 0:3, :, :]
    for i in range(0, opt.stop_scale + 1, 1):
        scale = math.pow(opt.scale_factor, opt.stop_scale - i)
        curr_real = imresize(real, scale, opt)
        reals.append(curr_real)
    return reals
Beispiel #27
0
def back_project_image(lr,
                       sf=2,
                       output_shape=None,
                       down_kernel='cubic',
                       up_kernel='cubic',
                       bp_iters=8):
    """Runs 'bp_iters' iteration of back projection SR technique"""
    tmp_sr = imresize(lr,
                      scale_factor=sf,
                      output_shape=output_shape,
                      kernel=up_kernel)
    for _ in range(bp_iters):
        tmp_sr = back_projection(y_sr=tmp_sr,
                                 y_lr=lr,
                                 down_kernel=down_kernel,
                                 up_kernel=up_kernel,
                                 sf=sf)
    return tmp_sr
Beispiel #28
0
def getpyr(im):
    gsim = []
    gsim.append(im.astype(np.uint8))
    tim = im
    lplcim = []
    n = 0
    while tim.shape[0] > 1:
        tim = Gsup(tim)
        gsim.append(tim)
        n += 1
    for i in range(n):
        t = gsim[i] - imresize.imresize(gsim[i + 1], gsim[i].shape)
        #        tmin=t.min()
        #        t=t+abs(tmin)
        #        tmax=t.max()
        #        t=t/tmax*255.0
        lplcim.append(t)
    return gsim, lplcim
Beispiel #29
0
def adjust_scales2image(real_, opt):
    """
    Adjust scales of the pyramid according to the input image dimensions by modifying the "opt" parameters. Number of scales, scale 0 input dimension is decided in this function.

    Arguments:
        real_ (torch.cuda.FloatTensor) : Original image
        opt (argparse.ArgumentParser) : Command line arguments.

    Returns:
        real (torch.cuda.FloatTensor) : Image shape adjusted to the 1st scale
    
    Modifies input "opt"
    """
    #opt.num_scales = int((math.log(math.pow(opt.min_size / (real_.shape[2]), 1), opt.scale_factor_init))) + 1
    opt.num_scales = math.ceil((math.log(
        math.pow(opt.min_size / (min(real_.shape[2], real_.shape[3])), 1),
        opt.scale_factor_init))) + 1

    opt.scale1 = min(opt.max_size / max([real_.shape[2], real_.shape[3]]),
                     1)  # min(250/max([real_.shape[0],real_.shape[1]]),1)
    real = imresize(real_, opt.scale1, opt)

    scale2stop = math.ceil(
        math.log(
            min([opt.max_size,
                 max([real_.shape[2], real_.shape[3]])]) /
            max([real_.shape[2], real_.shape[3]]), opt.scale_factor_init))
    opt.stop_scale = opt.num_scales - scale2stop

    #opt.scale_factor = math.pow(opt.min_size / (real.shape[2]), 1 / (opt.stop_scale))
    opt.scale_factor = math.pow(
        opt.min_size / min(real.shape[2], real.shape[3]), 1 / opt.stop_scale)
    scale2stop = math.ceil(
        math.log(
            min([opt.max_size,
                 max([real_.shape[2], real_.shape[3]])]) /
            max([real_.shape[2], real_.shape[3]]), opt.scale_factor_init))
    opt.stop_scale = opt.num_scales - scale2stop

    return real
Beispiel #30
0
    def __init__(self, conf, gan):
        # Default shapes
        self.g_input_shape = conf.input_crop_size
        self.d_input_shape = gan.G.output_size  # shape entering D downscaled by G
        self.d_output_shape = self.d_input_shape - gan.D.forward_shave

        # Read input image
        self.input_image = read_image(conf.input_image_path) / 255.
        self.input_lr = imresize(
            im=self.input_image, scale_factor=0.5, kernel='cubic'
        )  # read_image("/content/gdrive/MyDrive/for_ws_kernel_gan/0803ss.png") / 255.  # implement
        self.shave_edges(scale_factor=conf.scale_factor,
                         real_image=conf.real_image)

        # self.in_rows, self.in_cols = self.input_image.shape[0:2]

        # Create prob map for choosing the crop
        # print(len(self.input_image) * len(self.input_image[0]), my_prob_map(self.input_image).shape)
        # self.crop_indices_for_g = np.random.choice(a=(len(self.input_image) * len(self.input_image[0])), size=conf.max_iters, p=my_prob_map(self.input_image))
        # self.crop_indices_for_d = np.random.choice(a=(len(self.input_image) * len(self.input_image[0])), size=conf.max_iters, p=my_prob_map(self.input_image)) # self.make_list_of_crop_indices(conf=conf)
        self.crop_indices_for_g, self.crop_indices_for_d = self.make_list_of_crop_indices(
            conf=conf)
Beispiel #31
0
def main(dir_path):
    hrs = []
    lqs = []

    img_paths = get_img_paths(dir_path)
    for img_path in tqdm(img_paths):
        img = imread(img_path)

        for i in range(47):
            crop = random_crop(img, 160)
            cropX4 = imresize(crop, scalar_scale=0.25)
            hrs.append(crop)
            lqs.append(cropX4)

    shuffle_combined(hrs, lqs)

    hrs_path = get_hrs_path(dir_path)
    to_pklv4(hrs, hrs_path, vebose=True)
    to_pklv4_1pct(hrs, hrs_path, vebose=True)

    lqs_path = get_lqs_path(dir_path)
    to_pklv4(lqs, lqs_path, vebose=True)
    to_pklv4_1pct(lqs, lqs_path, vebose=True)
def space_time_backprojection_cpu(hfr_hr_pred, lfr_hr_in, hfr_lr_in):
    # inputs are in H-W-T-C np arrays
    _rusage('init')
    if bp_debug:
        print('-stbp- hfr_hr_pred {}, lfr_hr_in {}, hfr_lr_in {}'.format(
            hfr_hr_pred.shape if hfr_hr_pred is not None else None,
            lfr_hr_in.shape if lfr_hr_in is not None else None,
            hfr_lr_in.shape if hfr_lr_in is not None else None))

    num_iters = 1

    if hfr_hr_pred is None:
        # hfr_hr_pred = imresize(hfr_lr_in, scale_factor=[2., 2., 1.], kernel='cubic')
        hfr_hr_pred = imresize(hfr_lr_in,
                               output_shape=[
                                   lfr_hr_in.shape[0], lfr_hr_in.shape[1],
                                   hfr_lr_in.shape[2], hfr_lr_in.shape[3]
                               ],
                               kernel='cubic')
        # resizer = torch_resizer.Resizer(hfr_lr_in.shape, output_shape=[lfr_hr_in.shape[0], lfr_hr_in.shape[1], hfr_lr_in.shape[2], hfr_lr_in.shape[3]],
        #                                      kernel='cubic', antialiasing=True, device='cuda', dtype=torch.float16)
        # hfr_hr_pred = resizer(torch.tensor(hfr_lr_in, dtype=torch.float16).to(device)).cpu().numpy()
    _rusage('make pred')
    # CHECK!!
    _check_inputs(hfr_hr_pred, lfr_hr_in, hfr_lr_in)

    for it in range(num_iters):
        hfr_hr_pred = temporal_backprojection_np(hfr_hr_pred, lfr_hr_in)
        _rusage('{} tbp'.format(it))
        hfr_hr_pred = spatial_backprojection_np_cpu(hfr_hr_pred, hfr_lr_in)
        _rusage('{} sbp'.format(it))
    # final step is temporal
    hfr_hr_pred = temporal_backprojection_np(hfr_hr_pred, lfr_hr_in)

    _rusage('finally')
    return np.clip(hfr_hr_pred, 0., 1.)
Beispiel #33
0
        _, w, h = resize2_img.shape
        w = w - w % opt.upscale_factor
        h = h - h % opt.upscale_factor
        resize2_cut_img = resize2_img[:, :w, :h]

        # Save resize2_cut_img as HR image for TDSR
        path = os.path.join(tdsr_hr_dir, os.path.basename(file))
        resize2_cut_img = utils.to_pil_image(resize2_cut_img)
        resize2_cut_img.save(path, 'PNG')

        # Generate resize3_cut_img and apply model
        kernel_path = kernel_paths[np.random.randint(0, kernel_num)]
        mat = loadmat(kernel_path)
        k = np.array([mat['Kernel']]).squeeze()
        resize3_cut_img = imresize(np.array(resize2_cut_img),
                                   scale_factor=1.0 / opt.upscale_factor,
                                   kernel=k)

        # Save resize3_cut_img as LR image for TDSR
        path = os.path.join(tdsr_lr_dir, os.path.basename(file))
        utils.to_pil_image(resize3_cut_img).save(path, 'PNG')

    for file in tqdm(target_files, desc='Generating images from target'):
        # load HR image
        input_img = Image.open(file)
        input_img = TF.to_tensor(input_img)

        # Save input_img as HR image for TDSR
        path = os.path.join(tdsr_hr_dir, os.path.basename(file))
        HR_img = utils.to_pil_image(input_img)
        HR_img.save(path, 'PNG')
Beispiel #34
0
def main():
    conf_path = sys.argv[1]
    conf = conf_path.split('/')[-1].replace('.yml', '')
    model, opt = load_model(conf_path)

    lr_dir = opt['dataroot_LR']
    hr_dir = opt['dataroot_GT']

    lr_paths = fiFindByWildcard(os.path.join(lr_dir, '*.png'))
    hr_paths = fiFindByWildcard(os.path.join(hr_dir, '*.png'))

    this_dir = os.path.dirname(os.path.realpath(__file__))
    test_dir = os.path.join(this_dir, '..', 'results', conf)
    print(f"Out dir: {test_dir}")

    measure = Measure(use_gpu=False)

    fname = f'measure_full.csv'
    fname_tmp = fname + "_"
    path_out_measures = os.path.join(test_dir, fname_tmp)
    path_out_measures_final = os.path.join(test_dir, fname)

    if os.path.isfile(path_out_measures_final):
        df = pd.read_csv(path_out_measures_final)
    elif os.path.isfile(path_out_measures):
        df = pd.read_csv(path_out_measures)
    else:
        df = None

    scale = opt['scale']

    pad_factor = 2

    for lr_path, hr_path, idx_test in zip(lr_paths, hr_paths, range(len(lr_paths))):

        lr = imread(lr_path)
        hr = imread(hr_path)

        # Pad image to be % 2
        h, w, c = lr.shape
        lq_orig = lr.copy()
        lr = impad(lr, bottom=int(np.ceil(h / pad_factor) * pad_factor - h),
                   right=int(np.ceil(w / pad_factor) * pad_factor - w))

        lr_t = t(lr)

        heat = opt['heat']

        if df is not None and len(df[(df['heat'] == heat) & (df['name'] == idx_test)]) == 1:
            continue

        sr_t = model.get_sr(lq=lr_t, heat=heat)

        sr = rgb(torch.clamp(sr_t, 0, 1))
        sr = sr[:h * scale, :w * scale]

        path_out_sr = os.path.join(test_dir, "{:0.2f}".format(heat).replace('.', ''), "{:06d}.png".format(idx_test))
        imwrite(path_out_sr, sr)

        meas = OrderedDict(conf=conf, heat=heat, name=idx_test)
        meas['PSNR'], meas['SSIM'], meas['LPIPS'] = measure.measure(sr, hr)

        lr_reconstruct_rgb = imresize(sr, 1 / opt['scale'])
        meas['LRC PSNR'] = psnr(lq_orig, lr_reconstruct_rgb)

        str_out = format_measurements(meas)
        print(str_out)

        df = pd.DataFrame([meas]) if df is None else pd.concat([pd.DataFrame([meas]), df])

        df.to_csv(path_out_measures + "_", index=False)
        os.rename(path_out_measures + "_", path_out_measures)

    df.to_csv(path_out_measures, index=False)
    os.rename(path_out_measures, path_out_measures_final)

    str_out = format_measurements(df.mean())
    print(f"Results in: {path_out_measures_final}")
    print('Mean: ' + str_out)