示例#1
0
    def __getitem__(self, idx):
        #scale = self.scale[self.idx_scale]
        scale = 1
        idx = self._get_index(idx)
        img_in, img_tar = self._load_file(idx)
        # print("img_in ", img_in)
        # print("idx ", idx)
        img_in, img_tar, pi, ai = self._get_patch(img_in, img_tar)
        img_in, img_tar = common.set_channel(img_in, img_tar,
                                             self.args.n_colors)

        try:
            retVal = common.np2Tensor(img_in, img_tar, self.args.rgb_range)
            prevRetVal = retVal
        except:
            print("failed to common.np2Tensor. idx is: ", idx)

        return retVal
示例#2
0
文件: DIV2K.py 项目: Dong1P/MBSR
    def __getitem__(self, idx):
        scale = self.scale[self.idx_scale]
        idx = self._get_index(idx)
        img_in, img_tar,img_in4,img_in2 = self._load_file(idx)
        
        img_in, img_tar,img_in4,img_in2, pi, ai = self._get_patch(img_in, img_tar,img_in4,img_in2)
        if self.train:
            img_in, img_tar,img_in4,img_in2 = common.set_channel_Three(
            img_in, img_tar,img_in4,img_in2, self.args.n_colors)
            
            return common.np2Tensor_Three(img_in, img_tar,img_in4,img_in2, self.args.rgb_range)
            #return common.np2Tensor(img_in, img_tar, self.args.rgb_range)

        else:
            img_in, img_tar = common.set_channel(
            img_in, img_tar, self.args.n_colors)
            
            return common.np2Tensor(img_in, img_tar, self.args.rgb_range)
示例#3
0
    def test(self):
        torch.set_grad_enabled(False)

        self.ckp.write_log("\nEvaluation on video:")
        self.model.eval()

        timer_test = utility.timer()
        for idx_scale, scale in enumerate(self.scale):
            vidcap = cv2.VideoCapture(self.args.dir_demo)
            total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
            vidwri = cv2.VideoWriter(
                self.ckp.get_path("{}_x{}.avi".format(self.filename, scale)),
                cv2.VideoWriter_fourcc(*"XVID"),
                vidcap.get(cv2.CAP_PROP_FPS),
                (
                    int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                ),
            )

            tqdm_test = tqdm(range(total_frames), ncols=80)
            for _ in tqdm_test:
                success, lr = vidcap.read()
                if not success:
                    break

                lr, = common.set_channel(lr, n_channels=self.args.n_colors)
                lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
                lr, = self.prepare(lr.unsqueeze(0))
                sr = self.model(lr, idx_scale)
                sr = utility.quantize(sr, self.args.rgb_range).squeeze(0)

                normalized = sr * 255 / self.args.rgb_range
                ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy()
                vidwri.write(ndarr)

            vidcap.release()
            vidwri.release()

        self.ckp.write_log("Total: {:.2f}s\n".format(timer_test.toc()),
                           refresh=True)
        torch.set_grad_enabled(True)
示例#4
0
    def __getitem__(self, idx):
        lr_path = self.img_path[idx]
        video_path = os.path.dirname(lr_path)
        video_name = os.path.basename(video_path)
        frame_idx = int(os.path.basename(lr_path).split('.')[0])
        filename = video_name + '_{}'.format(frame_idx)
        idxs = self.index_generation(frame_idx, self.video_len[video_path],
                                     self.args.n_frames)
        lrs = []
        for idx in idxs:
            img_path = os.path.join(os.path.dirname(lr_path),
                                    '{}.png'.format(idx))
            temp = misc.imread(img_path)
            lrs.append(temp)
        lrs = np.array(lrs)

        lrs = common.set_channel([lrs], self.args.n_colors)[0]
        lrs = common.np2Tensor([lrs], self.args.rgb_range)[0]

        return lrs, -1, filename
示例#5
0
 def __getitem__(self, idx):
     hr, hr_path = self._load_file(idx)
     lr = None
     lr_path = None
     # Generate LR image from the HR image on the fly.
     hr, lr = self.generate_lr_hr(hr, idx)
     # LR_PATH key is not really used anywhere so its ok to set it to an
     # arbitrary value.
     lr_path = 'none'
     # if self.train:
     #     lr, hr = self._get_patch(lr, hr)
     lr_tensor, hr_tensor = common.np2Tensor([lr, hr],
                                             self.opt['rgb_range'])
     # return {'LR': lr_tensor, 'HR': hr_tensor, 'LR_path': lr_path, 'HR_path': hr_path}
     return {
         'LQ': lr_tensor,
         'GT': hr_tensor,
         'LQ_path': lr_path,
         'GT_path': hr_path
     }
示例#6
0
    def __getitem__(self, idx):
        lr, hr = self._load_file(idx)
        lr, hr = self._get_patch(lr, hr)
        lr, hr = common.set_channel(lr, hr, n_channels=self.opt.n_colors)
        lr_tensor, hr_tensor = common.np2Tensor(lr, hr, rgb_range=self.opt.rgb_range)

        # prob = 1.0
        # alpha = 0.7
        # aux_prob = 1.0
        # aux_alpha = 1.2
        # hr_tensor, lr_tensor = common.cutmixup(
        #     hr_tensor.clone(), lr_tensor.clone(),
        #     mixup_prob=aux_prob, mixup_alpha=aux_alpha,
        #     cutmix_prob=prob, cutmix_alpha=alpha,
        # )
        # hr_=tensor_to_np(hr_tensor)
        # lr_=tensor_to_np(lr_tensor)
        #
        # cv2.imwrite("./hr.png",hr_)
        # cv2.imwrite("./lr.png", lr_)
        return lr_tensor, hr_tensor
示例#7
0
文件: srdata.py 项目: delldu/VESPCN
 def __getitem__(self, idx):
     if self.train and self.args.process:
         lr, hr, filename = self._load_file_from_loaded_data(idx)
     else:
         lr, hr, filename = self._load_file(idx)
     if self.train:
         lr_extend = hr
     else:
         lr_extend = misc.imresize(lr,
                                   size=self.args.scale * 100,
                                   interp='bicubic')
     lr, lr_extend, hr = self.get_patch(lr, lr_extend, hr)
     lr, lr_extend, hr = common.set_channel(lr,
                                            lr_extend,
                                            hr,
                                            n_channels=self.args.n_colors)
     lr_tensor, lre_tensor, hr_tensor = common.np2Tensor(
         lr,
         lr_extend,
         hr,
         rgb_range=self.args.rgb_range,
         n_colors=self.args.n_colors)
     return lr_tensor, lre_tensor, hr_tensor, filename
示例#8
0
    def __getitem__(self, idx):
        # 将二进制文件转为imageio
        lr, hr, filename = self._load_file(idx)
        pair = self.get_patch(lr, hr)
        pair = common.set_channel(*pair, n_channels=self.args.n_colors)
        # 生成kernel
        pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range)
        k = utils_sisr.gen_kernel(scale_factor=np.array(
            [self.scale, self.scale]))  # Gaussian blur
        r_value = np.random.randint(0, 8)
        if r_value > 3:
            k = utils_deblur.blurkernel_synthesis(h=25)  # motion blur
        else:
            sf_k = random.choice(self.scale)
            k = utils_sisr.gen_kernel(scale_factor=np.array(
                [sf_k, sf_k]))  # Gaussian blur
            mode_k = np.random.randint(0, 8)
            k = util.augment_img(k, mode=mode_k)
            if np.random.randint(0, 8) == 1:
                noise_level = 0 / 255.0
            else:
                noise_level = np.random.randint(0, self.sigma_max) / 255.0

            # ---------------------------
            # Low-quality image
            # ---------------------------
            img_L = ndimage.filters.convolve(patch_H,
                                             np.expand_dims(k, axis=2),
                                             mode='wrap')
            img_L = img_L[0::self.sf, 0::self.sf, ...]
            # add Gaussian noise
            img_L = util.uint2single(img_L) + np.random.normal(
                0, noise_level, img_L.shape)
            img_H = patch_H

        return pair_t[0], pair_t[1], filename
 def __getitem__(self, idx):
     lr, hr, lr_path, hr_path = self._load_file(idx)
     if self.train:
         lr, hr = self._get_patch(lr, hr)
     lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.opt['rgb_range'])
     return {'LR': lr_tensor, 'HR': hr_tensor, 'LR_path': lr_path, 'HR_path': hr_path}
示例#10
0
 def __getitem__(self, idx):
     lr, hr, lrpan, pan, lr_path = self._load_file(idx)
     if self.train:
         lr, hr, lrpan, pan= self._get_patch(lr, hr, lrpan, pan, msx2=self.msx2)
     lr_tensor, hr_tensor, lrpan_tensor, pan_tensor = common.np2Tensor([lr, hr, lrpan, pan], self.opt['run_range'], self.opt['img_range'])
     return {'LR': lr_tensor, 'HR': hr_tensor, 'LRPAN': lrpan_tensor, 'PAN':pan_tensor, 'LR_path': lr_path}
 def __getitem__(self, idx):
     # get LR image
     lr, lr_path = self._load_file(idx)
     lr_tensor = common.np2Tensor([lr], self.opt['rgb_range'])[0]
     return {'LR': lr_tensor, 'LR_path': lr_path}
示例#12
0
    def __getitem__(self, idx):
        if self.args.use_stats:
            lr, hr, mask,mask_grads, filename = self._load_file(idx)
        elif self.args.use_real and (self.name in ['SIDD','SIDDVAL','NAM']):
            lr, hr, mask, noisemap, filename = self._load_file(idx)
        else:
            lr, hr, mask, filename = self._load_file(idx)

        if self.args.debug:
        #    if self.args.compute_grads:
           print('got item')
           print(lr.shape, hr.shape,mask.shape)
        if self.args.predict_groups:
            mask = self.into_groups(mask)
        if (not(self.args.compute_grads and ((not self.train) or lr.shape[0]*lr.shape[1]<2000*2000))) and (not(self.args.predict_groups and ((not self.train) or lr.shape[0]*lr.shape[1]<2000*2000))):
            if (self.mask_type == 'raisr') and (not self.args.pre_raisr):
                pair = self.get_patch(lr, hr)
                mask = self.get_raisr_buckets(pair[1])
            else:
                if self.args.pre_raisr and (not self.args.compute_grads)and (not self.args.predict_groups):
                    mask = np.expand_dims(mask, -1)
                if self.args.use_stats:
                    lr, hr,mask_grads, mask = self.get_patch(lr, hr,mask_grad=mask_grads, mask=mask)
                elif self.args.use_real and (self.name in ['SIDD','SIDDVAL','NAM']):
                    lr, hr, noisemap, mask = self.get_patch(lr, hr, mask_grad=noisemap, mask=mask)
                else:
                    lr,hr,mask = self.get_patch(lr, hr, mask=mask)

                if self.args.debug:
                    print('data augmented!')
                    print(mask.shape)
                    print(mask.dtype)
                    print(mask[10:20,10:20,0])
                if self.train and self.args.drop_mask:
                    mask = self.drop_mask(mask)
                pair= (lr,hr,mask)
        else:
            pair = (lr,hr,mask)
        if self.args.debug:
            print(pair[0].shape,pair[1].shape, pair[2].shape)
        #if self.args.real_isp or self.args.use_real:
        #    if not (self.name in ['SIDD','SIDDVAL','NAM']):
        #        lr, noisemap = AddRealNoise((hr.astype(np.float32))/255., self.CRF_para, self.iCRF_para, self.I_gl, self.B_gl, self.I_inv_gl, self.B_inv_gl)
        #        lr = lr * 255.
        #    else:
        #        if self.args.model == 'unet_noisemap':
        #            _, noisemap = AddRealNoise(hr, self.CRF_para, self.iCRF_para, self.I_gl, self.B_gl, self.I_inv_gl, self.B_inv_gl)
        #    if self.args.real_isp:
        #        mask = np.concatenate((mask,noisemap),axis=-1)
        #    if self.args.debug:
        #        print('noisemap and concatenate shape: ', noisemap.shape, noisemap.dtype, mask.shape, mask.dtype)
        pair = (lr,hr,mask)

        pair = common.set_channel(*pair, n_channels=self.args.n_colors)
        if self.args.debug:
            print('channels set!')
            print(pair[0].shape, pair[1].shape,pair[2].shape)
        if (self.mask_type == 'raisr') and (not self.args.pre_raisr):
            pair.append(mask)
        if (self.args.pre_raisr):
            if not (self.args.compute_grads or self.args.predict_groups):
                pair[2] = pair[2].squeeze(-1)
        pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range)
        if self.args.use_stats:
            np_transpose = np.ascontiguousarray(mask_grads.transpose((2, 0, 1)))
            mask_grads = torch.from_numpy(np_transpose).float()
            lr_grads = torch.cat((pair_t[0],mask_grads),0)

        if self.args.use_real:
            np_transpose = np.ascontiguousarray(noisemap.transpose((2, 0, 1)))
            noisemap = torch.from_numpy(np_transpose).float()
            lr_grads = torch.cat((pair_t[0],noisemap),0)

        if self.args.debug:
            if self.args.compute_grads or self.args.predict_groups:
                print(pair_t[0].shape, pair_t[1].shape,pair_t[2].shape)
        if self.args.use_stats or self.args.use_real:
            return lr_grads, pair_t[1], pair_t[2], filename
        else:
            return pair_t[0], pair_t[1], pair_t[2], filename
示例#13
0
 def __getitem__(self, idx):
     lr, hr, filename = self._load_file(idx)
     lr, hr = self._get_patch(lr, hr)
     lr, hr = common.set_channel([lr, hr], self.args.n_colors)
     lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.args.rgb_range)
     return lr_tensor, hr_tensor, filename
示例#14
0
def load_img(path):
    lr = imageio.imread(path)
    lr, = common.set_channel(lr, n_channels=args.n_colors)
    lr_t, = common.np2Tensor(lr, rgb_range=args.rgb_range)
    lr_t = lr_t.view(1, 3, 384, 510).to(torch.device('cuda'))
    return lr_t
示例#15
0
 def __getitem__(self, idx):
     lr1,lr2, lr3, hr, lr_path1, lr_path2, lr_path3, hr_path = self._load_file(idx)
     if self.train:
         lr1, lr2, lr3, hr = self._get_patch(lr1,lr2, lr3, hr)
     lr_tensor1, lr_tensor2, lr_tensor3, hr_tensor = common.np2Tensor([lr1,lr2, lr3, hr], self.opt['rgb_range'])
     return {'LR1': lr_tensor1, 'LR2': lr_tensor2, 'LR3': lr_tensor3, 'HR': hr_tensor, 'LR_path1': lr_path1, 'LR_path2': lr_path2, 'LR_path3': lr_path3, 'HR_path': hr_path}
示例#16
0
文件: kerneldata.py 项目: royson/VDAN
    def __getitem__(self, idx):
        if self.args.real:
            if self.args.process:
                lrs, _, _, filenames = self._load_file_from_loaded_data(idx)
            else:
                lrs, _, _, filenames = self._load_file(idx)

            if len(lrs.shape) == 4:
                b, ih, iw, _ = lrs.shape
            else:
                lrs = np.array([np.expand_dims(lr, axis=2) for lr in lrs])
                b, ih, iw, _ = lrs.shape
                #print(lrs.shape)

            ip = self.args.patch_size

            ix = random.randrange(0, iw - ip + 1)
            iy = random.randrange(0, ih - ip + 1)

            patches = [self.get_patch(lr, None, ix, iy) for lr in lrs]
            lrs = np.array([patch[0] for patch in patches])
            lrs = np.array(
                common.set_channel(*lrs, n_channels=self.args.n_colors))
            lr_tensors = common.np2Tensor(*lrs,
                                          rgb_range=self.args.rgb_range,
                                          n_colors=self.args.n_colors)

            return torch.stack(lr_tensors), filenames
        else:
            if self.args.process:
                lrs, hrs, kernels, filenames = self._load_file_from_loaded_data(
                    idx)
            else:
                lrs, hrs, kernels, filenames = self._load_file(idx)

            if len(lrs.shape) == 4:
                b, ih, iw, _ = lrs.shape
            else:
                lrs = np.array([np.expand_dims(lr, axis=2) for lr in lrs])
                b, ih, iw, _ = lrs.shape
                #print(lrs.shape)

            ip = self.args.patch_size

            ix = random.randrange(0, iw - ip + 1)
            iy = random.randrange(0, ih - ip + 1)

            patches = [
                self.get_patch(lr, hr, ix, iy) for lr, hr in zip(lrs, hrs)
            ]
            lrs = np.array([patch[0] for patch in patches])
            hrs = np.array([patch[1] for patch in patches])
            kernels = np.array([kernel for kernel in kernels])
            lrs = np.array(
                common.set_channel(*lrs, n_channels=self.args.n_colors))
            hrs = np.array(
                common.set_channel(*hrs, n_channels=self.args.n_colors))
            lr_tensors = common.np2Tensor(*lrs,
                                          rgb_range=self.args.rgb_range,
                                          n_colors=self.args.n_colors)
            hr_tensors = common.np2Tensor(*hrs,
                                          rgb_range=self.args.rgb_range,
                                          n_colors=self.args.n_colors)

            if self.args.pca_input:
                kernel_tensors = torch.from_numpy(kernels).float()
            else:
                kernels = np.reshape(
                    kernels,
                    (kernels.shape[0], kernels.shape[1], kernels.shape[2], 1))
                kernel_tensors = common.kernel2Tensor(
                    *kernels,
                    rgb_range=self.args.rgb_range,
                    n_colors=self.args.n_colors,
                    norm=False)
                kernel_tensors = torch.stack(kernel_tensors)
            return torch.stack(lr_tensors), torch.stack(
                hr_tensors), kernel_tensors, filenames
示例#17
0
 def __getitem__(self, idx):
     filename = os.path.split(self.filelist[idx])[-1]
     filename, _ = os.path.splitext(filename)
     lr = misc.imread(self.filelist[idx])
     lr = common.set_channel(lr, self.args.n_colors)
     return common.np2Tensor([lr], self.args.rgb_range)[0], -1, filename, -1
示例#18
0
 def __getitem__(self, idx):
     lr, hr = self._load_file(idx)
     lr, hr = self._get_patch(lr, hr)
     lr, hr = common.set_channel(lr, hr, n_channels=self.opt.n_colors)
     lr_tensor, hr_tensor = common.np2Tensor(lr, hr, rgb_range=self.opt.rgb_range)
     return lr_tensor, hr_tensor
示例#19
0
    def __getitem__(self, idx):
        img_lr, img_hr = self._load_file(idx)
        img_lr, img_hr = self._get_patch(img_lr, img_hr)
        img_lr, img_hr = common.set_channel(img_lr, img_hr, self.args.n_colors)

        return common.np2Tensor(img_lr, img_hr, self.args.rgb_range)
示例#20
0
    print("Model has {:.2f}M Parameters".format(
        (np.sum([i.numel() for i in model.parameters()])) / 1.e6))
    model = model
    model.eval()
    checkpoint.done()

LR_path = args.dir_data
save_path = '../experiment/LR_result'

img_list = glob.glob(os.path.join(LR_path, '*.png'))

torch.set_grad_enabled(False)

for i in tqdm.tqdm(img_list, ncols=80):

    img = imageio.imread(i)
    lr = common.set_channel(img, n_channels=3)  # *解耦 从而可以传入*args
    lr = (common.np2Tensor(lr[0], rgb_range=255)[0]).unsqueeze(0).cuda()

    pred = model(lr, 1)

    #detach 类似于原来的data,只不过现在tensor与variable合并,但是detach更安全(修改原tensor求导会报错)
    #先取到与tensor的data内容,再转cpu再转numpy

    pred = pred.detach().squeeze(0).permute(1, 2, 0).clamp(0,
                                                           255).cpu().numpy()
    pred = pred.astype('uint8')
    imageio.imwrite(os.path.join(save_path, (i.split('/')[-1])),
                    pred,
                    format='png')
示例#21
0
    def __getitem__(self, idx):
        # get LR image
        lr, lr_path, pan, pan_path = self._load_file(idx)
        lr_tensor, pan_tensor = common.np2Tensor([lr, pan], self.opt['run_range'], self.opt['img_range'])

        return {'LR': lr_tensor, 'LR_path': lr_path, 'PAN': pan_tensor, 'PAN_path': pan_path }