Exemplo n.º 1
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.
        Parameters:
            index (int): a random integer for data indexing
        Returns a dictionary that contains A and A_paths or
            LR and LR_path
            A (tensor): an image in the input domain
            A_paths (str): the path of the image
        """

        # get single image
        img_A, A_path = read_single_dataset(self.opt, index, self.A_paths,
                                            self.A_env)

        # change color space if necessary
        # TODO: move to get_transform()
        if self.opt.get('color', None):
            img_A = channel_convert(image_channels(img_A), self.opt['color'],
                                    [img_A])[0]

        # # apply transforms if any
        # default_int_method = get_default_imethod(image_type(img_A))
        # transform_params = get_params(self.opt, image_size(img_A))
        # A_transform = get_transform(
        #         self.opt,
        #         transform_params,
        #         # grayscale=(input_nc == 1),
        #         method=default_int_method)
        # img_A = A_transform(img_A)

        ######## Convert images to PyTorch Tensors ########

        img_A = self.tensor_transform(img_A)

        if self.vars == 'A':
            return {'A': img_A, 'A_path': A_path}
        else:
            return {'LR': img_A, 'LR_path': A_path}
Exemplo n.º 2
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.
        Parameters:
            index (int): a random integer for data indexing
        Returns a dictionary that contains A, B, A_paths and B_paths
            (or LR, HR, LR_paths and HR_paths)
            A (tensor): an image in the input domain
            B (tensor): its corresponding image in the target domain
            A_paths (str): paths A images
            B_paths (str): paths B images (can be same as A_paths if 
                using single images)
        """
        scale = self.opt.get('scale')

        ######## Read the images ########
        if self.AB_paths:
            img_A, img_B, A_path, B_path = read_split_single_dataset(
                self.opt, index, self.AB_paths, self.AB_env)
        else:
            img_A, img_B, A_path, B_path = read_imgs_from_path(
                self.opt, index, self.A_paths, self.B_paths, self.A_env,
                self.B_env)

        ######## Modify the images ########

        # for the validation / test phases
        if self.opt['phase'] != 'train':
            img_type = image_type(img_B)
            # B/HR modcrop
            img_B = modcrop(img_B, scale=scale, img_type=img_type)
            # modcrop and downscale A/LR if enabled
            if self.opt['lr_downscale']:
                img_A = modcrop(img_A, scale=scale, img_type=img_type)
                # TODO: 'pil' images will use default method for scaling
                img_A, _ = Scale(img_A,
                                 scale,
                                 algo=self.opt.get('lr_downscale_types', 777),
                                 img_type=img_type)

        # change color space if necessary
        # TODO: move to get_transform()
        color_B = self.opt.get('color', None) or self.opt.get('color_HR', None)
        if color_B:
            img_B = channel_convert(image_channels(img_B), color_B, [img_B])[0]
        color_A = self.opt.get('color', None) or self.opt.get('color_LR', None)
        if color_A:
            img_A = channel_convert(image_channels(img_A), color_A, [img_A])[0]

        ######## Augmentations ########

        #Augmentations during training
        if self.opt['phase'] == 'train':

            default_int_method = get_default_imethod(image_type(img_A))

            # random HR downscale
            img_A, img_B = random_downscale_B(img_A=img_A,
                                              img_B=img_B,
                                              opt=self.opt)

            # validate there's an img_A, if not, use img_B
            if img_A is None:
                img_A = img_B
                print(
                    f"Image A: {A_path} was not loaded correctly, using B pair to downscale on the fly."
                )

            # validate proper dimensions between paired images, generate A if needed
            img_A, img_B = paired_imgs_check(img_A,
                                             img_B,
                                             opt=self.opt,
                                             ds_kernels=self.ds_kernels)

            # get and apply the paired transformations below
            transform_params = get_params(scale_opt(self.opt, scale),
                                          image_size(img_A))
            A_transform = get_transform(
                scale_opt(self.opt, scale),
                transform_params,
                # grayscale=(input_nc == 1),
                method=default_int_method)
            B_transform = get_transform(
                self.opt,
                scale_params(transform_params, scale),
                # grayscale=(output_nc == 1),
                method=default_int_method)
            img_A = A_transform(img_A)
            img_B = B_transform(img_B)

            # Below are the On The Fly augmentations

            # get and apply the unpaired transformations below
            a_aug_params, b_aug_params = get_unpaired_params(self.opt)

            a_augmentations = get_augmentations(
                self.opt,
                params=a_aug_params,
                noise_patches=self.noise_patches,
            )
            b_augmentations = get_augmentations(
                self.opt,
                params=b_aug_params,
                noise_patches=self.noise_patches,
            )

            img_A = a_augmentations(img_A)
            img_B = b_augmentations(img_B)

        # Alternative position for changing the colorspace of A/LR.
        # color_A = self.opt.get('color', None) or self.opt.get('color_A', None)
        # if color_A:
        #     img_A = channel_convert(image_channels(img_A), color_A, [img_A])[0]

        ######## Convert images to PyTorch Tensors ########

        tensor_transform = get_totensor(self.opt,
                                        params=self.totensor_params,
                                        toTensor=True,
                                        grayscale=False)
        img_A = tensor_transform(img_A)
        img_B = tensor_transform(img_B)

        if A_path is None:
            A_path = B_path
        if self.vars == 'AB':
            return {'A': img_A, 'B': img_B, 'A_path': A_path, 'B_path': B_path}
        else:
            return {
                'LR': img_A,
                'HR': img_B,
                'LR_path': A_path,
                'HR_path': B_path
            }
Exemplo n.º 3
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.
        Parameters:
            index (int): a random integer for data indexing
        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor): an image in the input domain
            B (tensor): its corresponding image in the target domain
            A_paths (str): paths A images
            B_paths (str): paths B images
        """
        scale = self.opt.get('scale')

        ######## Read the images ########
        img_A, A_path = read_single_dataset(opt=self.opt,
                                            index=index,
                                            paths=self.A_paths,
                                            env=self.A_env,
                                            idx_case='inrange',
                                            d_size=self.A_size)
        img_B, B_path = read_single_dataset(opt=self.opt,
                                            index=index,
                                            paths=self.B_paths,
                                            env=self.B_env,
                                            idx_case=self.idx_case,
                                            d_size=self.B_size)

        ######## Modify the images ########
        # change color space if necessary
        # TODO: move to get_transform()
        color_B = self.opt.get('color', None) or self.opt.get('color_B', None)
        if color_B:
            img_B = channel_convert(image_channels(img_B), color_B, [img_B])[0]
        color_A = self.opt.get('color', None) or self.opt.get('color_A', None)
        if color_A:
            img_A = channel_convert(image_channels(img_A), color_A, [img_A])[0]

        # apply image transformation
        default_int_method = get_default_imethod(image_type(img_A))
        # get first set of random params
        transform_params_A = get_params(scale_opt(self.opt, scale),
                                        image_size(img_A))
        # get second set of random params
        transform_params_B = get_params(self.opt, image_size(img_B))

        A_transform = get_transform(
            scale_opt(self.opt, scale),
            transform_params_A,
            # grayscale=(input_nc == 1),
            method=default_int_method)
        B_transform = get_transform(
            self.opt,
            transform_params_B,
            # grayscale=(output_nc == 1),
            method=default_int_method)
        img_A = A_transform(img_A)
        img_B = B_transform(img_B)

        #TODO: not needed initially, but available
        # get and apply the unpaired transformations below
        # a_aug_params, b_aug_params = get_unpaired_params(self.opt)

        # a_augmentations = get_augmentations(
        #     self.opt,
        #     params=a_aug_params,
        #     noise_patches=self.noise_patches,
        #     )
        # b_augmentations = get_augmentations(
        #     self.opt,
        #     params=b_aug_params,
        #     noise_patches=self.noise_patches,
        #     )

        # img_A = a_augmentations(img_A)
        # img_B = b_augmentations(img_B)

        ######## Convert images to PyTorch Tensors ########

        tensor_transform = get_totensor(self.opt,
                                        params=self.totensor_params,
                                        toTensor=True,
                                        grayscale=False)
        img_A = tensor_transform(img_A)
        img_B = tensor_transform(img_B)

        if self.vars == 'AB':
            return {'A': img_A, 'B': img_B, 'A_path': A_path, 'B_path': B_path}
        else:
            return {
                'LR': img_A,
                'HR': img_B,
                'LR_path': A_path,
                'HR_path': B_path
            }