Esempio n. 1
0
    def __getitem__(self, index):

        if self.opt['phase'] == 'train':

            patch_L, patch_H = self.L_data[index], self.H_data[index]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = random.randint(0, 7)
            patch_L = util.augment_img(patch_L, mode=mode)
            patch_H = util.augment_img(patch_H, mode=mode)

            patch_L, patch_H = util.uint2tensor3(patch_L), util.uint2tensor3(
                patch_H)

        else:

            L_path, H_path = self.paths_L[index], self.paths_H[index]
            patch_L = util.imread_uint(L_path, self.n_channels)
            patch_H = util.imread_uint(H_path, self.n_channels)

            patch_L, patch_H = util.uint2tensor3(patch_L), util.uint2tensor3(
                patch_H)

        return {'L': patch_L, 'H': patch_H}
Esempio n. 2
0
    def __getitem__(self, index):

        # ------------------------------------
        # get H image
        # ------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)

        # ------------------------------------
        # get L image
        # ------------------------------------
        L_path = self.paths_L[index]
        img_L = util.imread_uint(L_path, self.n_channels)

        # ------------------------------------
        # if train, get L/H patch pair
        # ------------------------------------
        if self.opt['phase'] == 'train':

            H, W, _ = img_H.shape

            # --------------------------------
            # randomly crop the patch
            # --------------------------------
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_L = img_L[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            patch_L, patch_H = util.augment_img(
                patch_L, mode=mode), util.augment_img(patch_H, mode=mode)

            # --------------------------------
            # HWC to CHW, numpy(uint) to tensor
            # --------------------------------
            img_L, img_H = util.uint2tensor3(patch_L), util.uint2tensor3(
                patch_H)

        else:

            # --------------------------------
            # HWC to CHW, numpy(uint) to tensor
            # --------------------------------
            img_L, img_H = util.uint2tensor3(img_L), util.uint2tensor3(img_H)

        return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
    def __getitem__(self, index: int) -> Dict[str, Union[str, torch.Tensor]]:
        # get H image
        img_path = self.img_paths[index]
        img_H = util.imread_uint(img_path, self.n_channels)

        H, W = img_H.shape[:2]

        if self.opt['phase'] == 'train':

            self.count += 1

            # crop
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # augmentation
            patch_H = util.augment_img(patch_H, mode=np.random.randint(0, 8))

            # HWC to CHW, numpy(uint) to tensor
            img_H = util.uint2tensor3(patch_H)
            img_L: torch.Tensor = img_H.clone()

            # get noise level
            noise_level: torch.FloatTensor = torch.FloatTensor(
                [np.random.uniform(self.sigma[0], self.sigma[1])]) / 255.0

            # add noise
            noise = torch.randn(img_L.size()).mul_(noise_level).float()
            img_L.add_(noise)

        else:
            img_H = util.uint2single(img_H)
            img_L = np.copy(img_H)

            # add noise
            np.random.seed(seed=0)
            img_L += np.random.normal(0, self.sigma / 255.0, img_L.shape)

            noise_level = torch.FloatTensor([self.sigma / 255.0])

            img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(
                img_L)

        return {
            'y': img_L,
            'y_gt': img_H,
            'sigma': noise_level.unsqueeze(1).unsqueeze(1),
            'path': img_path
        }
Esempio n. 4
0
    def __getitem__(self, index):
        L_path = None

        # ------------------------------------
        # get L image
        # ------------------------------------
        L_path = self.paths_L[index]
        img_L = util.imread_uint(L_path, self.n_channels)

        # ------------------------------------
        # HWC to CHW, numpy to tensor
        # ------------------------------------
        img_L = util.uint2tensor3(img_L)

        return {'L': img_L, 'L_path': L_path}
    def __getitem__(self, index):

        H_path = 'toy.png'
        if self.opt['phase'] == 'train':

            patch_H = self.H_data[index]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            patch_H = util.augment_img(patch_H, mode=mode)

            patch_H = util.uint2tensor3(patch_H)
            patch_L = patch_H.clone()

            # ------------------------------------
            # add noise
            # ------------------------------------
            noise = torch.randn(patch_L.size()).mul_(self.sigma / 255.0)
            patch_L.add_(noise)

        else:

            H_path = self.paths_H[index]
            img_H = util.imread_uint(H_path, self.n_channels)
            img_H = util.uint2single(img_H)
            img_L = np.copy(img_H)

            # ------------------------------------
            # add noise
            # ------------------------------------
            np.random.seed(seed=0)
            img_L += np.random.normal(0, self.sigma_test / 255.0, img_L.shape)
            patch_L, patch_H = util.single2tensor3(img_L), util.single2tensor3(
                img_H)

        L_path = H_path
        return {'L': patch_L, 'H': patch_H, 'L_path': L_path, 'H_path': H_path}
    def __getitem__(self, index):
        # -------------------------------------
        # get H image
        # -------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)

        L_path = H_path

        if self.opt['phase'] == 'train':
            """
            # --------------------------------
            # get L/H/M patch pairs
            # --------------------------------
            """
            H, W = img_H.shape[:2]

            # ---------------------------------
            # randomly crop the patch
            # ---------------------------------
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # ---------------------------------
            # augmentation - flip, rotate
            # ---------------------------------
            mode = np.random.randint(0, 8)
            patch_H = util.augment_img(patch_H, mode=mode)

            # ---------------------------------
            # HWC to CHW, numpy(uint) to tensor
            # ---------------------------------
            img_H = util.uint2tensor3(patch_H)
            img_L = img_H.clone()

            # ---------------------------------
            # get noise level
            # ---------------------------------
            # noise_level = torch.FloatTensor([np.random.randint(self.sigma_min, self.sigma_max)])/255.0
            noise_level = torch.FloatTensor(
                [np.random.uniform(self.sigma_min, self.sigma_max)]) / 255.0

            # ---------------------------------
            # add noise
            # ---------------------------------
            noise = torch.randn(img_L.size()).mul_(noise_level).float()
            img_L.add_(noise)

        else:
            """
            # --------------------------------
            # get L/H/sigma image pairs
            # --------------------------------
            """
            img_H = util.uint2single(img_H)
            img_L = np.copy(img_H)
            np.random.seed(seed=0)
            img_L += np.random.normal(0, self.sigma_test / 255.0, img_L.shape)
            noise_level = torch.FloatTensor([self.sigma_test / 255.0])

            # ---------------------------------
            # L/H image pairs
            # ---------------------------------
            img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(
                img_L)

        noise_level = noise_level.unsqueeze(1).unsqueeze(1)

        return {
            'L': img_L,
            'H': img_H,
            'C': noise_level,
            'L_path': L_path,
            'H_path': H_path
        }
Esempio n. 7
0
    def __getitem__(self, index):
        # ------------------------------------
        # get H image
        # ------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)

        L_path = H_path

        if self.opt['phase'] == 'train':
            """
            # --------------------------------
            # get L/H patch pairs
            # --------------------------------
            """
            H, W, _ = img_H.shape

            # --------------------------------
            # randomly crop the patch
            # --------------------------------
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # --------------------------------
            # augmentation - flip, rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            patch_H = util.augment_img(patch_H, mode=mode)

            # --------------------------------
            # HWC to CHW, numpy(uint) to tensor
            # --------------------------------
            img_H = util.uint2tensor3(patch_H)
            img_L = img_H.clone()

            # --------------------------------
            # add noise
            # --------------------------------
            noise = torch.randn(img_L.size()).mul_(self.sigma / 255.0)
            img_L.add_(noise)

        else:
            """
            # --------------------------------
            # get L/H image pairs
            # --------------------------------
            """
            img_H = util.uint2single(img_H)
            img_L = np.copy(img_H)

            # --------------------------------
            # add noise
            # --------------------------------
            np.random.seed(seed=0)
            img_L += np.random.normal(0, self.sigma_test / 255.0, img_L.shape)

            # --------------------------------
            # HWC to CHW, numpy to tensor
            # --------------------------------
            img_L = util.single2tensor3(img_L)
            img_H = util.single2tensor3(img_H)

        return {'L': img_L, 'H': img_H, 'H_path': H_path, 'L_path': L_path}
Esempio n. 8
0
    def __getitem__(self, index):

        # -------------------
        # get H image
        # -------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)
        L_path = H_path

        if self.opt['phase'] == 'train':

            # ---------------------------
            # 1) scale factor, ensure each batch only involves one scale factor
            # ---------------------------
            if self.count % self.opt['dataloader_batch_size'] == 0:
                # sf = random.choice([1,2,3,4])
                self.sf = random.choice(self.scales)
                # self.count = 0  # optional
            self.count += 1
            H, W, _ = img_H.shape

            # ----------------------------
            # randomly crop the patch
            # ----------------------------
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # ---------------------------
            # augmentation - flip, rotate
            # ---------------------------
            mode = np.random.randint(0, 8)
            patch_H = util.augment_img(patch_H, mode=mode)

            # ---------------------------
            # 2) kernel
            # ---------------------------
            r_value = random.randint(0, 7)
            if r_value > 3:
                k = utils_deblur.blurkernel_synthesis(h=25)  # motion blur
            else:
                sf_k = random.choice(self.scales)
                k = utils_sisr.gen_kernel(scale_factor=np.array(
                    [sf_k, sf_k]))  # Gaussian blur
                mode_k = random.randint(0, 7)
                k = util.augment_img(k, mode=mode_k)

            # ---------------------------
            # 3) noise level
            # ---------------------------
            if random.randint(0, 8) == 1:
                noise_level = 0 / 255.0
            else:
                noise_level = np.random.randint(0, self.sigma_max) / 255.0

            # ---------------------------
            # Low-quality image
            # ---------------------------
            img_L = ndimage.filters.convolve(patch_H,
                                             np.expand_dims(k, axis=2),
                                             mode='wrap')
            img_L = img_L[0::self.sf, 0::self.sf, ...]
            # add Gaussian noise
            img_L = util.uint2single(img_L) + np.random.normal(
                0, noise_level, img_L.shape)
            img_H = patch_H

        else:

            k = self.kernels[0, 0].astype(np.float64)  # validation kernel
            k /= np.sum(k)
            noise_level = 0. / 255.0  # validation noise level
            img_L = ndimage.filters.convolve(img_H,
                                             np.expand_dims(k, axis=2),
                                             mode='wrap')  # blur
            img_L = img_L[0::self.sf_validation, 0::self.sf_validation,
                          ...]  # downsampling
            img_L = util.uint2single(img_L) + np.random.normal(
                0, noise_level, img_L.shape)

        k = util.single2tensor3(np.expand_dims(np.float32(k), axis=2))
        img_H, img_L = util.uint2tensor3(img_H), util.single2tensor3(img_L)
        noise_level = torch.FloatTensor([noise_level]).view([1, 1, 1])

        return {
            'L': img_L,
            'H': img_H,
            'k': k,
            'sigma': noise_level,
            'sf': self.sf,
            'L_path': L_path,
            'H_path': H_path
        }