Пример #1
0
    def __getitem__(self, index):

        if self.opt['phase'] == 'train':

            patch_L, patch_H = self.L_data[index], self.H_data[index]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = random.randint(0, 7)
            patch_L = util.augment_img(patch_L, mode=mode)
            patch_H = util.augment_img(patch_H, mode=mode)

            patch_L, patch_H = util.uint2tensor3(patch_L), util.uint2tensor3(
                patch_H)

        else:

            L_path, H_path = self.paths_L[index], self.paths_H[index]
            patch_L = util.imread_uint(L_path, self.n_channels)
            patch_H = util.imread_uint(H_path, self.n_channels)

            patch_L, patch_H = util.uint2tensor3(patch_L), util.uint2tensor3(
                patch_H)

        return {'L': patch_L, 'H': patch_H}
Пример #2
0
    def __getitem__(self, index):

        # ------------------------------------
        # get H image
        # ------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)

        # ------------------------------------
        # get L image
        # ------------------------------------
        L_path = self.paths_L[index]
        img_L = util.imread_uint(L_path, self.n_channels)

        # ------------------------------------
        # if train, get L/H patch pair
        # ------------------------------------
        if self.opt['phase'] == 'train':

            H, W, _ = img_H.shape

            # --------------------------------
            # randomly crop the patch
            # --------------------------------
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_L = img_L[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            patch_L, patch_H = util.augment_img(
                patch_L, mode=mode), util.augment_img(patch_H, mode=mode)

            # --------------------------------
            # HWC to CHW, numpy(uint) to tensor
            # --------------------------------
            img_L, img_H = util.uint2tensor3(patch_L), util.uint2tensor3(
                patch_H)

        else:

            # --------------------------------
            # HWC to CHW, numpy(uint) to tensor
            # --------------------------------
            img_L, img_H = util.uint2tensor3(img_L), util.uint2tensor3(img_H)

        return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
    def __getitem__(self, index: int) -> Dict[str, Union[str, torch.Tensor]]:
        # get H image
        img_path = self.img_paths[index]
        img_H = util.imread_uint(img_path, self.n_channels)

        H, W = img_H.shape[:2]

        if self.opt['phase'] == 'train':

            self.count += 1

            # crop
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # augmentation
            patch_H = util.augment_img(patch_H, mode=np.random.randint(0, 8))

            # HWC to CHW, numpy(uint) to tensor
            img_H = util.uint2tensor3(patch_H)
            img_L: torch.Tensor = img_H.clone()

            # get noise level
            noise_level: torch.FloatTensor = torch.FloatTensor(
                [np.random.uniform(self.sigma[0], self.sigma[1])]) / 255.0

            # add noise
            noise = torch.randn(img_L.size()).mul_(noise_level).float()
            img_L.add_(noise)

        else:
            img_H = util.uint2single(img_H)
            img_L = np.copy(img_H)

            # add noise
            np.random.seed(seed=0)
            img_L += np.random.normal(0, self.sigma / 255.0, img_L.shape)

            noise_level = torch.FloatTensor([self.sigma / 255.0])

            img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(
                img_L)

        return {
            'y': img_L,
            'y_gt': img_H,
            'sigma': noise_level.unsqueeze(1).unsqueeze(1),
            'path': img_path
        }
    def __getitem__(self, index):

        H_path = 'toy.png'
        if self.opt['phase'] == 'train':

            patch_H = self.H_data[index]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            patch_H = util.augment_img(patch_H, mode=mode)

            patch_H = util.uint2tensor3(patch_H)
            patch_L = patch_H.clone()

            # ------------------------------------
            # add noise
            # ------------------------------------
            noise = torch.randn(patch_L.size()).mul_(self.sigma / 255.0)
            patch_L.add_(noise)

        else:

            H_path = self.paths_H[index]
            img_H = util.imread_uint(H_path, self.n_channels)
            img_H = util.uint2single(img_H)
            img_L = np.copy(img_H)

            # ------------------------------------
            # add noise
            # ------------------------------------
            np.random.seed(seed=0)
            img_L += np.random.normal(0, self.sigma_test / 255.0, img_L.shape)
            patch_L, patch_H = util.single2tensor3(img_L), util.single2tensor3(
                img_H)

        L_path = H_path
        return {'L': patch_L, 'H': patch_H, 'L_path': L_path, 'H_path': H_path}
Пример #5
0
    def __getitem__(self, index):
        # -------------------------------------
        # get H image
        # -------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)

        L_path = H_path

        if self.opt['phase'] == 'train':
            """
            # --------------------------------
            # get L/H/M patch pairs
            # --------------------------------
            """
            H, W = img_H.shape[:2]

            # ---------------------------------
            # randomly crop the patch
            # ---------------------------------
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # ---------------------------------
            # augmentation - flip, rotate
            # ---------------------------------
            mode = np.random.randint(0, 8)
            patch_H = util.augment_img(patch_H, mode=mode)

            # ---------------------------------
            # HWC to CHW, numpy(uint) to tensor
            # ---------------------------------
            img_H = util.uint2tensor3(patch_H)
            img_L = img_H.clone()

            # ---------------------------------
            # get noise level
            # ---------------------------------
            # noise_level = torch.FloatTensor([np.random.randint(self.sigma_min, self.sigma_max)])/255.0
            noise_level = torch.FloatTensor(
                [np.random.uniform(self.sigma_min, self.sigma_max)]) / 255.0

            # ---------------------------------
            # add noise
            # ---------------------------------
            noise = torch.randn(img_L.size()).mul_(noise_level).float()
            img_L.add_(noise)

        else:
            """
            # --------------------------------
            # get L/H/sigma image pairs
            # --------------------------------
            """
            img_H = util.uint2single(img_H)
            img_L = np.copy(img_H)
            np.random.seed(seed=0)
            img_L += np.random.normal(0, self.sigma_test / 255.0, img_L.shape)
            noise_level = torch.FloatTensor([self.sigma_test / 255.0])

            # ---------------------------------
            # L/H image pairs
            # ---------------------------------
            img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(
                img_L)

        noise_level = noise_level.unsqueeze(1).unsqueeze(1)

        return {
            'L': img_L,
            'H': img_H,
            'C': noise_level,
            'L_path': L_path,
            'H_path': H_path
        }
Пример #6
0
    def __getitem__(self, index):
        # ------------------------------------
        # get H image
        # ------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)

        L_path = H_path

        if self.opt['phase'] == 'train':
            """
            # --------------------------------
            # get L/H patch pairs
            # --------------------------------
            """
            H, W, _ = img_H.shape

            # --------------------------------
            # randomly crop the patch
            # --------------------------------
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # --------------------------------
            # augmentation - flip, rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            patch_H = util.augment_img(patch_H, mode=mode)

            # --------------------------------
            # HWC to CHW, numpy(uint) to tensor
            # --------------------------------
            img_H = util.uint2tensor3(patch_H)
            img_L = img_H.clone()

            # --------------------------------
            # add noise
            # --------------------------------
            noise = torch.randn(img_L.size()).mul_(self.sigma / 255.0)
            img_L.add_(noise)

        else:
            """
            # --------------------------------
            # get L/H image pairs
            # --------------------------------
            """
            img_H = util.uint2single(img_H)
            img_L = np.copy(img_H)

            # --------------------------------
            # add noise
            # --------------------------------
            np.random.seed(seed=0)
            img_L += np.random.normal(0, self.sigma_test / 255.0, img_L.shape)

            # --------------------------------
            # HWC to CHW, numpy to tensor
            # --------------------------------
            img_L = util.single2tensor3(img_L)
            img_H = util.single2tensor3(img_H)

        return {'L': img_L, 'H': img_H, 'H_path': H_path, 'L_path': L_path}
Пример #7
0
    def __getitem__(self, index):

        # -------------------
        # get H image
        # -------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)
        L_path = H_path

        if self.opt['phase'] == 'train':

            # ---------------------------
            # 1) scale factor, ensure each batch only involves one scale factor
            # ---------------------------
            if self.count % self.opt['dataloader_batch_size'] == 0:
                # sf = random.choice([1,2,3,4])
                self.sf = random.choice(self.scales)
                # self.count = 0  # optional
            self.count += 1
            H, W, _ = img_H.shape

            # ----------------------------
            # randomly crop the patch
            # ----------------------------
            rnd_h = random.randint(0, max(0, H - self.patch_size))
            rnd_w = random.randint(0, max(0, W - self.patch_size))
            patch_H = img_H[rnd_h:rnd_h + self.patch_size,
                            rnd_w:rnd_w + self.patch_size, :]

            # ---------------------------
            # augmentation - flip, rotate
            # ---------------------------
            mode = np.random.randint(0, 8)
            patch_H = util.augment_img(patch_H, mode=mode)

            # ---------------------------
            # 2) kernel
            # ---------------------------
            r_value = random.randint(0, 7)
            if r_value > 3:
                k = utils_deblur.blurkernel_synthesis(h=25)  # motion blur
            else:
                sf_k = random.choice(self.scales)
                k = utils_sisr.gen_kernel(scale_factor=np.array(
                    [sf_k, sf_k]))  # Gaussian blur
                mode_k = random.randint(0, 7)
                k = util.augment_img(k, mode=mode_k)

            # ---------------------------
            # 3) noise level
            # ---------------------------
            if random.randint(0, 8) == 1:
                noise_level = 0 / 255.0
            else:
                noise_level = np.random.randint(0, self.sigma_max) / 255.0

            # ---------------------------
            # Low-quality image
            # ---------------------------
            img_L = ndimage.filters.convolve(patch_H,
                                             np.expand_dims(k, axis=2),
                                             mode='wrap')
            img_L = img_L[0::self.sf, 0::self.sf, ...]
            # add Gaussian noise
            img_L = util.uint2single(img_L) + np.random.normal(
                0, noise_level, img_L.shape)
            img_H = patch_H

        else:

            k = self.kernels[0, 0].astype(np.float64)  # validation kernel
            k /= np.sum(k)
            noise_level = 0. / 255.0  # validation noise level
            img_L = ndimage.filters.convolve(img_H,
                                             np.expand_dims(k, axis=2),
                                             mode='wrap')  # blur
            img_L = img_L[0::self.sf_validation, 0::self.sf_validation,
                          ...]  # downsampling
            img_L = util.uint2single(img_L) + np.random.normal(
                0, noise_level, img_L.shape)

        k = util.single2tensor3(np.expand_dims(np.float32(k), axis=2))
        img_H, img_L = util.uint2tensor3(img_H), util.single2tensor3(img_L)
        noise_level = torch.FloatTensor([noise_level]).view([1, 1, 1])

        return {
            'L': img_L,
            'H': img_H,
            'k': k,
            'sigma': noise_level,
            'sf': self.sf,
            'L_path': L_path,
            'H_path': H_path
        }
Пример #8
0
    def __getitem__(self, index):

        # ------------------------------------
        # get H image
        # ------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)
        img_H = util.uint2single(img_H)

        # ------------------------------------
        # modcrop for SR
        # ------------------------------------
        img_H = util.modcrop(img_H, self.sf)

        # ------------------------------------
        # sythesize L image via matlab's bicubic
        # ------------------------------------
        H, W, _ = img_H.shape
        img_L = util.imresize_np(img_H, 1 / self.sf, True)

        if self.opt['phase'] == 'train':
            """
            # --------------------------------
            # get L/H patch pairs
            # --------------------------------
            """
            H, W, C = img_L.shape

            # --------------------------------
            # randomly crop L patch
            # --------------------------------
            rnd_h = random.randint(0, max(0, H - self.L_size))
            rnd_w = random.randint(0, max(0, W - self.L_size))
            img_L = img_L[rnd_h:rnd_h + self.L_size,
                          rnd_w:rnd_w + self.L_size, :]

            # --------------------------------
            # crop corresponding H patch
            # --------------------------------
            rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf)
            img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size,
                          rnd_w_H:rnd_w_H + self.patch_size, :]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            img_L, img_H = util.augment_img(
                img_L, mode=mode), util.augment_img(img_H, mode=mode)

            # --------------------------------
            # get patch pairs
            # --------------------------------
            img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(
                img_L)

            # --------------------------------
            # select noise level and get Gaussian noise
            # --------------------------------
            if random.random() < 0.1:
                noise_level = torch.zeros(1).float()
            else:
                noise_level = torch.FloatTensor([
                    np.random.uniform(self.sigma_min, self.sigma_max)
                ]) / 255.0
                # noise_level = torch.rand(1)*50/255.0
                # noise_level = torch.min(torch.from_numpy(np.float32([7*np.random.chisquare(2.5)/255.0])),torch.Tensor([50./255.]))

        else:

            img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(
                img_L)

            noise_level = torch.FloatTensor([self.sigma_test])

        # ------------------------------------
        # add noise
        # ------------------------------------
        noise = torch.randn(img_L.size()).mul_(noise_level).float()
        img_L.add_(noise)

        # ------------------------------------
        # get noise level map M
        # ------------------------------------
        M_vector = noise_level.unsqueeze(1).unsqueeze(1)
        M = M_vector.repeat(1, img_L.size()[-2], img_L.size()[-1])
        """
        # -------------------------------------
        # concat L and noise level map M
        # -------------------------------------
        """
        img_L = torch.cat((img_L, M), 0)

        L_path = H_path

        return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
Пример #9
0
    def __getitem__(self, index):

        # ------------------------------------
        # get H image
        # ------------------------------------
        H_path = self.paths_H[index]
        H_resolution = [int(s) for s in self.sizes_H[index].split('_')]
        img_H = _read_img_lmdb(self.h_env, H_path, H_resolution)
        img_H = util.uint2single(img_H)
        img_H = util.modcrop(img_H, self.sf)

        # ----------------------------------------------
        # get down4 sharp image(img_h0) to be target in train for deblur
        # ----------------------------------------------
        img_h0 = util.imresize_np(img_H, 1 / 4, True)

        # ------------------------------------
        # get L image
        # ------------------------------------
        L_path = self.paths_L[index]
        L_resolution = [int(s) for s in self.sizes_L[index].split('_')]
        img_L = _read_img_lmdb(self.l_env, L_path, L_resolution)
        img_L = util.uint2single(img_L)

        #        # ----------------------------------------------
        #        # get blur image to be input in train for deblur
        #        # ----------------------------------------------
        #        img_l0 = img_L

        # ------------------------------------
        # if train, get L/H patch pair
        # ------------------------------------
        if self.opt['phase'] == 'train':

            H, W, C = img_L.shape
            # --------------------------------
            # randomly crop the L patch
            # --------------------------------
            rnd_h = random.randint(0, max(0, H - self.L_size))
            rnd_w = random.randint(0, max(0, W - self.L_size))
            img_L = img_L[rnd_h:rnd_h + self.L_size,
                          rnd_w:rnd_w + self.L_size, :]
            img_h0 = img_h0[rnd_h:rnd_h + self.L_size,
                            rnd_w:rnd_w + self.L_size, :]
            # --------------------------------
            # crop corresponding H patch
            # --------------------------------
            rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf)
            img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size,
                          rnd_w_H:rnd_w_H + self.patch_size, :]
            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            img_L, img_H, img_h0 = util.augment_img(
                img_L, mode=mode), util.augment_img(
                    img_H, mode=mode), util.augment_img(img_h0, mode=mode)

        # ------------------------------------
        # L/H pairs, HWC to CHW, numpy to tensor
        # ------------------------------------
        img_deblur = [img_L, img_h0]
        img_deblur = util.generate_pyramid(*img_deblur, n_scales=3)
        img_deblur = util.np2tensor(*img_deblur)
        img_ls = img_deblur[0]
        img_hs = img_deblur[1]
        img_L = img_deblur[0]
        img_H = util.single2tensor3(img_H)
        img_L0 = img_L[0]

        #print(img_L[0].shape,img_H.shape,img_ls[0].shape,img_hs[0].shape)
        return {
            'L0': img_L0,
            'L': img_L,
            'H': img_H,
            'L_path': L_path,
            'H_path': H_path,
            'ls': img_ls,
            'hs': img_hs
        }
Пример #10
0
    def __getitem__(self, index):

        L_path = None
        # ------------------------------------
        # get H image
        # ------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)
        img_H = util.uint2single(img_H)

        # ------------------------------------
        # modcrop
        # ------------------------------------
        img_H = util.modcrop(img_H, self.sf)

        # ------------------------------------
        # get L image
        # ------------------------------------
        if self.paths_L:
            # --------------------------------
            # directly load L image
            # --------------------------------
            L_path = self.paths_L[index]
            img_L = util.imread_uint(L_path, self.n_channels)
            img_L = util.uint2single(img_L)

        else:
            # --------------------------------
            # sythesize L image via matlab's bicubic
            # --------------------------------
            H, W = img_H.shape[:2]
            img_L = util.imresize_np(img_H, 1 / self.sf, True)

        # ------------------------------------
        # if train, get L/H patch pair
        # ------------------------------------
        if self.opt['phase'] == 'train':

            H, W, C = img_L.shape

            # --------------------------------
            # randomly crop the L patch
            # --------------------------------
            rnd_h = random.randint(0, max(0, H - self.L_size))
            rnd_w = random.randint(0, max(0, W - self.L_size))
            img_L = img_L[rnd_h:rnd_h + self.L_size, rnd_w:rnd_w + self.L_size, :]

            # --------------------------------
            # crop corresponding H patch
            # --------------------------------
            rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf)
            img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            img_L, img_H = util.augment_img(img_L, mode=mode), util.augment_img(img_H, mode=mode)

        # ------------------------------------
        # L/H pairs, HWC to CHW, numpy to tensor
        # ------------------------------------
        img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)

        if L_path is None:
            L_path = H_path

        return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
Пример #11
0
    def __getitem__(self, index):

        # ------------------------------------
        # get H image
        # ------------------------------------
        H_path = self.paths_H[index]
        img_H = util.imread_uint(H_path, self.n_channels)
        img_H = util.uint2single(img_H)

        # ------------------------------------
        # modcrop for SR
        # ------------------------------------
        img_H = util.modcrop(img_H, self.sf)

        # ------------------------------------
        # kernel
        # ------------------------------------
        if self.opt['phase'] == 'train':
            l_max = 10
            theta = np.pi*np.random.rand(1)
            l1 = 0.1+l_max*np.random.rand(1)
            l2 = 0.1+(l1-0.1)*np.random.rand(1)

            kernel = utils_sisr.anisotropic_Gaussian(ksize=self.ksize, theta=theta[0], l1=l1[0], l2=l2[0])
        else:
            kernel = utils_sisr.anisotropic_Gaussian(ksize=self.ksize, theta=np.pi, l1=0.1, l2=0.1)

        k = np.reshape(kernel, (-1), order="F")
        k_reduced = np.dot(self.p, k)
        k_reduced = torch.from_numpy(k_reduced).float()

        # ------------------------------------
        # sythesize L image via specified degradation model
        # ------------------------------------
        H, W, _ = img_H.shape
        img_L = utils_sisr.srmd_degradation(img_H, kernel, self.sf)
        img_L = np.float32(img_L)

        if self.opt['phase'] == 'train':
            """
            # --------------------------------
            # get L/H patch pairs
            # --------------------------------
            """
            H, W, C = img_L.shape

            # --------------------------------
            # randomly crop L patch
            # --------------------------------
            rnd_h = random.randint(0, max(0, H - self.L_size))
            rnd_w = random.randint(0, max(0, W - self.L_size))
            img_L = img_L[rnd_h:rnd_h + self.L_size, rnd_w:rnd_w + self.L_size, :]

            # --------------------------------
            # crop corresponding H patch
            # --------------------------------
            rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf)
            img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]

            # --------------------------------
            # augmentation - flip and/or rotate
            # --------------------------------
            mode = np.random.randint(0, 8)
            img_L, img_H = util.augment_img(img_L, mode=mode), util.augment_img(img_H, mode=mode)

            # --------------------------------
            # get patch pairs
            # --------------------------------
            img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)

            # --------------------------------
            # select noise level and get Gaussian noise
            # --------------------------------
            if random.random() < 0.1:
                noise_level = torch.zeros(1).float()
            else:
                noise_level = torch.FloatTensor([np.random.uniform(self.sigma_min, self.sigma_max)])/255.0
                # noise_level = torch.rand(1)*50/255.0
                # noise_level = torch.min(torch.from_numpy(np.float32([7*np.random.chisquare(2.5)/255.0])),torch.Tensor([50./255.]))
    
        else:

            img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
            noise_level = noise_level = torch.FloatTensor([self.sigma_test])

        # ------------------------------------
        # add noise
        # ------------------------------------
        noise = torch.randn(img_L.size()).mul_(noise_level).float()
        img_L.add_(noise)

        # ------------------------------------
        # get degradation map M
        # ------------------------------------
        M_vector = torch.cat((k_reduced, noise_level), 0).unsqueeze(1).unsqueeze(1)
        M = M_vector.repeat(1, img_L.size()[-2], img_L.size()[-1])

        """
        # -------------------------------------
        # concat L and noise level map M
        # -------------------------------------
        """

        img_L = torch.cat((img_L, M), 0)
        L_path = H_path

        return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}