Exemple #1
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        # read a image given a random interger index
        AB_path = self.AB_paths[index].split()
        #print(AB_path)

        A_path = AB_path[0]
        B_path = AB_path[1]
        path = os.path.join(self.dataroot,A_path)
        A = Image.open(path).convert('RGB')
        path = os.path.join(self.dataroot, B_path)
        B = Image.open(path).convert('RGB')


        transform_params = get_params(self.opt, A.size)
        A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
        B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))

        A = A_transform(A)
        B = B_transform(B)



        return {'A':A, 'B':B, 'A_path':A_path,'B_path':B_path}
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index (int)      -- a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor)       -- an image in the input domain
            B (tensor)       -- its corresponding image in the target domain
            A_paths (str)    -- image paths
            B_paths (str)    -- image paths
        """
        A_path = self.A_paths[index % self.A_size]  # make sure index is within then range
        # if self.opt.serial_batches:   # make sure index is within then range
        #     index_B = index % self.B_size
        # else:   # randomize the index for domain B to avoid fixed pairs.
        #     index_B = random.randint(0, self.B_size - 1)
        B_path = self.B_paths[index]
        A_img = Image.open(A_path).convert('RGB')
        B_img = Image.open(B_path).convert('RGB')

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, A_img.size)
        A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
        B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))

        A = A_transform(A_img)
        B = B_transform(B_img)

        return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
Exemple #3
0
def open_image(path, opt):
    image = Image.open(path)
    params = get_params(opt, image.size)
    image = image.convert('RGB')
    transform_image = get_transform(opt, params)
    image_tensor = transform_image(image)
    return image_tensor
def get_paper_result(old_result_path):
    old_result = Image.open(old_result_path).convert('RGB')
    params = get_params(opt, old_result.size)
    transform_image = get_transform(opt, params)
    old_result_tensor = transform_image(old_result)

    return old_result_tensor
Exemple #5
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths
        """
        # read a image given a random integer index
        A_path = self.A_paths[index]
        B_path = self.B_paths[index]
        A = Image.open(A_path).convert('RGB')
        B = Image.open(B_path).convert('RGB')

        assert (
            B.size[0] >= A.size[0] and B.size[1] >= A.size[1]
        ), 'By default, we think that in general tasks, the image size of target domain B is greater than or equal to source domain A'
        transform_params = get_params(self.opt, A.size)
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc == 1))
        B_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.output_nc == 1),
                                    crop_size_scale=self.opt.SR_factor)

        A = A_transform(A)
        B = B_transform(B)
        return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an video in the input domain
            B (tensor) - - its corresponding video in the target domain
            A_paths (str) - - video paths
            B_paths (str) - - video paths
        """
        # read a video given a random integer index
        A_path = self.A_paths[index]
        B_path = self.B_paths[index]
        A, B = self.get_image_list(A_path, B_path, index)

        # some checks
        assert (len(A) == len(B))
        for i in range(len(A)):
            assert (
                B[i].size[0] >= A[i].size[0] and B[i].size[1] >= A[i].size[1]
            ), 'By default, we think that in general tasks, the image size of target domain B is greater than or equal to source domain A'
        for i in range(len(A)):
            assert (B[i].size[0] == self.opt.SR_factor * A[i].size[0]
                    and B[i].size[1] == self.opt.SR_factor * A[i].size[1]
                    ), 'the dataset should satisfy the sr_factor {}'.format(
                        self.opt.SR_factor)

        # by default, we add an black image to the start of list A, B
        # black_img_A = Image.fromarray(np.zeros((A[0].size[1], A[0].size[0], self.input_nc), dtype=np.uint8))  # h w c
        # black_img_B = Image.fromarray(np.zeros((B[0].size[1], B[0].size[0], self.input_nc), dtype=np.uint8))  # h w c
        # A.insert(0, black_img_A)
        # B.insert(0, black_img_B)

        transform_params = get_params(self.opt, A[0].size)
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc == 1),
                                    domain="A")
        B_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.output_nc == 1),
                                    domain="B")

        for i in range(len(A)):
            # print("doing transform..the {}th frame of {}th video".format(i, index))
            A[i] = A_transform(A[i])
            B[i] = B_transform(B[i])

        # list of 3dim to 4dim  e.g. ... [3,128,128] ... to [11,3,128,128]
        A = torch.stack(A, 0)
        B = torch.stack(B, 0)

        return {
            'A': A,
            'B': B,
            'A_paths': os.path.join(self.dir_A, A_path),
            'B_paths': os.path.join(self.dir_B, B_path)
        }
    def __getitem__(self, index):        
        ### input A (label maps)
        A_path = self.A_paths[index]              
        A = Image.open(A_path)        
        params = get_params(self.opt, A.size)

        transform_A = get_transform(self.opt, params,Color_Input=self.Color_Input)
        if self.Color_Input=="RGB":
            A_tensor= transform_A(A.convert('RGB'))
        elif self.Color_Input == "gray":
            A_tensor = transform_A(A.convert('I'))
        else:
            raise NotImplementedError("This Color Channel not implemented")
        B_tensor = inst_tensor = feat_tensor = 0
        ### input B (real images)

        B_path = self.B_paths[index]
        transform_B = get_transform(self.opt, params,Color_Input=self.Color_Output)
        B = Image.open(B_path)
        if self.Color_Output == "RGB":
            B_tensor = transform_B(B.convert("RGB"))
        elif self.Color_Output == "gray":
            B_tensor = transform_B(B.convert("I"))
        else:
            raise NotImplementedError("This Color Channel not implemented")

        input_dict = {'label': A_tensor,'image': B_tensor, 'path': A_path, 'index':index}

        return input_dict
Exemple #8
0
    def __getitem__(self, index):
        A_path = self.A_paths[index]
        B_path = self.B_paths[index]
        A_img = Image.open(A_path).convert('RGB')
        B_img = Image.open(B_path).convert('RGB')

        transform_params = get_params(self.opt, A_img.size, test=self.istest)
        # apply the same transform to A B L
        transform = get_transform(self.opt, transform_params, test=self.istest)

        A = transform(A_img)
        B = transform(B_img)

        if self.istest:
            return {'A': A, 'A_paths': A_path, 'B': B, 'B_paths': B_path}

        L_path = self.L_paths[index]
        tmp = np.array(Image.open(L_path), dtype=np.uint32) / 255
        L_img = Image.fromarray(tmp)
        transform_L = get_transform(self.opt,
                                    transform_params,
                                    method=Image.NEAREST,
                                    normalize=False,
                                    test=self.istest)
        L = transform_L(L_img)

        return {
            'A': A,
            'A_paths': A_path,
            'B': B,
            'B_paths': B_path,
            'L': L,
            'L_paths': L_path
        }
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        # read a image given a random integer index
        A = self.imgIn
        B = self.imgOut

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, A.size)
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc == 1),
                                    add_noise=self.opt.add_noise)
        B_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.output_nc == 1))

        A = A_transform(A)
        B = B_transform(B)

        return {
            'A': A.unsqueeze(0),
            'B': B.unsqueeze(0),
            'A_paths': "",
            'B_paths': ""
        }
    def get_input_by_names(self, image_path, image, label_img):
        label = Image.fromarray(label_img)
        params = get_params(self.opt, label.size)
        transform_label = get_transform(self.opt,
                                        params,
                                        method=Image.NEAREST,
                                        normalize=False)
        label_tensor = transform_label(label) * 255.0
        label_tensor[label_tensor ==
                     255] = self.opt.label_nc  # 'unknown' is opt.label_nc
        label_tensor.unsqueeze_(0)

        # input image (real images)]
        # image = Image.open(image_path)
        # image = image.convert('RGB')

        transform_image = get_transform(self.opt, params)
        image_tensor = transform_image(image)
        image_tensor.unsqueeze_(0)

        # if using instance maps
        if self.opt.no_instance:
            instance_tensor = torch.Tensor([0])

        input_dict = {
            'label': label_tensor,
            'instance': instance_tensor,
            'image': image_tensor,
            'path': image_path,
        }

        # Give subclasses a chance to modify the final output
        self.postprocess(input_dict)

        return input_dict
Exemple #11
0
def get_image_tensor(jpg_path, opt):
    image = Image.open(jpg_path).convert('RGB')
    params = get_params(opt, image.size)
    transform = get_transform(opt, params)
    image_tensor = transform(image)

    return image_tensor
Exemple #12
0
def get_label_tensor_from_kpts(kpts, jpg_path, opt):
    params = get_params(opt, np.array([256, 256]))
    transform_image = get_transform(opt, params)
    label_show = draw_points(kpts)
    label_show_tensor = transform_image(label_show)

    return label_show_tensor
Exemple #13
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        # read a image given a random integer index
        (xray_path_1, xray_path_2) = self.image_paths[index]
        xray_1 = Image.open(xray_path_1).convert('L')
        xray_2 = Image.open(xray_path_2).convert('L')
        
        (label_1, label_2) = self.label_list[index]
        
        label3 = self.info_list[index]
        transform_params = get_params(self.opt, xray_1.size)
        xray_transform = get_transform(self.opt, transform_params, grayscale=True, run_type = self.run_type)
        xray_1 = xray_transform(xray_1)
        xray_1 = torch.cat((xray_1, xray_1, xray_1), 0)
        xray_2 = xray_transform(xray_2)
        xray_2 = torch.cat((xray_2, xray_2, xray_2), 0)
        xray = torch.stack([xray_1, xray_2], dim=0)
        
        return {'A': xray, 'B': np.array([label_1, label_2]), 'info': label3}
Exemple #14
0
 def get_label_tensor(self, path):
     label = Image.open(path)
     params1 = get_params(self.opt, label.size)
     transform_label = get_transform(self.opt, params1, method=Image.NEAREST, normalize=False)
     label_tensor = transform_label(label) * 255.0
     label_tensor[label_tensor == 255] = self.opt.label_nc  # 'unknown' is opt.label_nc
     return label_tensor, params1
Exemple #15
0
    def __getitem__(self, index):
        ### input A (label maps)
        # print('bedde id %d'%index)
        I_path = self.imagePaths[index]
        I_img = Image.open(I_path).convert('RGB')
        params = get_params(self.opt, I_img.size)

        I_name = os.path.splitext(ntpath.basename(I_path))[0]
        cityName = I_name.split('_')[0]

        I_dir = ntpath.dirname(I_path)
        base_dir = ntpath.dirname(I_dir)

        J_path = os.path.join(base_dir, 'gt', '%s_clear.png' % cityName)
        J_img = Image.open(J_path).convert('RGB')

        base_dir = ntpath.dirname(I_dir)
        mask_path = os.path.join(base_dir, 'mask', '%s_mask.mat' % I_name)
        mask_info = sio.loadmat(mask_path)

        J_root = ntpath.dirname(ntpath.dirname(I_path))

        # apply image transformation
        real_I = self.transform(I_img)
        real_J = (self.toTensor(J_img) - 0.5) / 0.5

        return {
            'haze': real_I,
            'clear': real_J,
            'mask': mask_info['mask'],
            'city': cityName,
            'paths': I_path
        }
Exemple #16
0
    def __getitem__(self, index):
        cap_pos = [
            250, 150
        ]  # we crop a view of 500*800 size and start position is cap_pos
        ret, frame = self.cap.read(index)
        frame = frame[cap_pos[0]:cap_pos[0] + 500,
                      cap_pos[1]:cap_pos[1] + 800, :]

        # apply transform to frame
        transform_params = get_params(self.opt, frame.shape[:2])
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(True))

        frame = Image.fromarray(frame)

        frame = A_transform(frame)
        img_path = 'E:/ela_reconstruction/script/AAA/test_ruijin'

        return {
            'A': frame,
            'B': frame,
            'mask': frame,
            'A_paths': img_path,
            'B_paths': img_path,
            'mask_path': img_path
        }
Exemple #17
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths
        """
        # read a image given a random integer index
        a_path = self.data_paths_a[index]
        b_path = self.data_paths_b[index]

        a_image = Image.open(a_path).convert('RGB')
        b_image = Image.open(b_path).convert('RGB')

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, a_image.size)

        a_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
        b_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))

        a = a_transform(a_image)
        b = b_transform(b_image)

        return {'A': a, 'B': b, 'A_paths': a_path, 'B_paths': b_path}
Exemple #18
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        # read a image given a random integer index
        xray_path = self.data_paths[index]
        xray = Image.open(xray_path).convert('L')
        # apply the same transform to both A and B
        transform_params = get_params(self.opt, xray.size)
        xray_transform = get_transform(self.opt,
                                       transform_params,
                                       grayscale=True)
        xray = xray_transform(xray)
        filename = xray_path.split('/')[-1].split('\\')[-1]
        label = self.label_sets[filename]
        id_label = self.label_dict[label]
        xray_label = np.zeros(len(self.label_dict))
        xray_label[id_label] = 1
        return {'A': xray, 'B': id_label}
Exemple #19
0
 def generate_face(self, sketch):
     params = get_params(self.opt, sketch.size)
     transform_A = get_transform_sketch(self.opt, params)
     A_tensor = transform_A(sketch.convert('RGB'))  # A_tensor: [-1, 1]
     A_tensor = A_tensor.unsqueeze(0)
     model_output = self.model.inference(A_tensor)
     return util.tensor2im(model_output['fake_image'][0])
Exemple #20
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
           
            A_paths (str) - - image paths
           
        """
        # read a image given a random integer index

        AB_path = self.AB_paths[index]
        fileName = Path(AB_path).name

        #viability = self.cellCount[str(fileName)]

        AB = Image.open(AB_path).convert('RGB')

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, AB.size)
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc == 1))
        A = A_transform(AB)

        return {'A': A, 'A_paths': AB_path}
Exemple #21
0
    def __getitem__(self, index):
        assert (self.opt.loadSize >= self.opt.fineSize)
        ### input A (label maps)
        A_path = self.A_paths[index]
        A = Image.open(A_path).convert('RGB')
        ### inverse color : (boundary)255->0 && (crystal)0->255
        if self.inverse:
            A = ImageOps.invert(A)
        params = get_params(self.opt, A.size)
        #         transform = get_transform(self.opt, params,method=Image.NEAREST)
        transform = get_transform(self.opt, params)
        input_nc = self.opt.input_nc
        output_nc = self.opt.output_nc
        A = transform(A)
        if input_nc == 1:  # RGB to gray
            tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
            A = tmp.unsqueeze(0)
        B = 0
        if self.opt.isTrain:
            ### input B (real images)
            B_path = self.B_paths[index]
            B = Image.open(B_path).convert('RGB')
            transform = get_transform(self.opt, params)
            B = transform(B)
            if output_nc == 1:  # RGB to gray
                tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
                B = tmp.unsqueeze(0)

        return {'A': A, 'B': B, 'A_path': A_path}
Exemple #22
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        A_path = self.A_paths[index]
        B_path = self.B_paths[index]
        A = Image.open(A_path).convert('RGB')
        B = Image.open(B_path).convert('RGB')
        assert A.size == B.size, "%s and %s are not the same size, A and B must have the same image size" % (
            A_path, B_path)
        # Apply the same transform on A and B
        transform_params = get_params(self.opt, A.size)
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc == 1))
        B_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc == 1))
        A = A_transform(A)
        B = B_transform(B)
        return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
Exemple #23
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        # read a image given a random integer index
        xray_path = self.image_paths[index]
        xray = Image.open(xray_path).convert('L')
        label = self.label_list[index]
        info = self.info_list[index]
        #        if label == 0:
        transform_params = get_params(self.opt, xray.size)
        xray_transform = get_transform(self.opt,
                                       transform_params,
                                       grayscale=True,
                                       run_type=self.run_type)
        xray = xray_transform(xray)
        #        else:
        #          transform_t = info[1]
        #          transform_params = get_params(self.opt, xray.size)
        #          xray_transform = get_transform_augumentation(self.opt, transform_params, transform_type = transform_t, grayscale=True, run_type = self.run_type)
        #          xray = xray_transform(xray)
        xray = torch.cat((xray, xray, xray), 0)

        return {'A': xray, 'B': label, 'info': info}
    def __getitem__(self, index):
        ADB_path = self.ADB_paths[index]
        if not self.opt.load_in_memory or self.cache.get(index) is None:
            ADB = Image.open(ADB_path).convert('RGB')
            if self.opt.load_in_memory:
                self.cache[index] = ADB
        else:
            ADB = self.cache[index]

        # split ADB image into A , D and B
        w, h = ADB.size
        w3 = int(w / 3)
        A = ADB.crop((0, 0, w3, h))
        D = ADB.crop((w3, 0, 2 * w3, h))
        B = ADB.crop((2 * w3, 0, w, h))

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, A.size)
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc // 2 == 1))
        D_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc // 2 == 1))
        B_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.output_nc == 1))

        A = A_transform(A)
        D = D_transform(D)
        B = B_transform(B)

        #concatenate input
        A = torch.cat([A, D], dim=0)
        return {'A': A, 'B': B, 'A_paths': ADB_path, 'B_paths': ADB_path}
Exemple #25
0
    def get_single_input(self):

        image_path = self.GT_img_path
        image = self.GT_img
        label_img = self.mat_img[:, :, 0]

        label = Image.fromarray(label_img)
        params = get_params(self.opt, label.size)
        transform_label = get_transform(self.opt,
                                        params,
                                        method=Image.NEAREST,
                                        normalize=False)
        label_tensor = transform_label(label) * 255.0
        label_tensor[label_tensor ==
                     255] = self.opt.label_nc  # 'unknown' is opt.label_nc
        label_tensor.unsqueeze_(0)

        image_tensor = torch.zeros([1, 3, 256, 256])

        # if using instance maps
        if self.opt.no_instance:
            instance_tensor = torch.Tensor([0])

        input_dict = {
            'label': label_tensor,
            'instance': instance_tensor,
            'image': image_tensor,
            'path': image_path,
        }

        return input_dict
Exemple #26
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - an image in the input domain
            B (tensor) - - its corresponding image in the target domain
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        # read a image given a random integer index
        AB_path = self.AB_paths[index]
        AB = Image.open(AB_path).convert('RGB')
        # split AB image into A and B
        w, h = AB.size
        w2 = int(w / 2)
        A = AB.crop((0, 0, w2, h))
        B = AB.crop((w2, 0, w, h))

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, A.size)
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc == 1))
        B_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.output_nc == 1))

        A = A_transform(A)
        B = B_transform(B)

        return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
Exemple #27
0
    def __getitem__(self, index):
        AB_path = self.AB_paths[index]
        if not self.opt.load_in_memory or self.cache.get(index) is None:
            AB = Image.open(AB_path).convert('RGB')
            if self.opt.load_in_memory:
                self.cache[index] = AB
        else:
            AB = self.cache[index]

        # split AB image into A and B
        w, h = AB.size
        w2 = int(w / 2)
        A = AB.crop((0, 0, w2, h))
        B = AB.crop((w2, 0, w, h))

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, A.size)
        A_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.input_nc == 1))
        B_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=(self.output_nc == 1))

        A = A_transform(A)
        B = B_transform(B)
        return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
Exemple #28
0
def getnewface(img,mask,mask_m):
    h,w,d = img.shape
    img = Image.fromarray(img)
    lmask = labelMask(mask)
    lmask_m = labelMask(mask_m)


    os.environ["CUDA_VISIBLE_DEVICES"] = str(0)
    opt = getOptions()

    model = create_model(opt)   

    params = get_params(opt, (512,512))
    transform_mask = get_transform(opt, params, method=Image.NEAREST, normalize=False, normalize_mask=True)
    transform_image = get_transform(opt, params)
    mask = transform_mask(Image.fromarray(np.uint8(lmask))) 
    mask_m = transform_mask(Image.fromarray(np.uint8(lmask_m)))
    img = transform_image(img)
 
    generated = model.inference(torch.FloatTensor([mask_m.numpy()]), torch.FloatTensor([mask.numpy()]), torch.FloatTensor([img.numpy()]))   

    result = generated.permute(0, 2, 3, 1)
    if torch.cuda.is_available():
        result = result.cpu().numpy()
    else:
        result = result.detach().numpy()

    result = (result + 1) * 127.5
    result = np.asarray(result[0,:,:,:], dtype=np.uint8)
    result = Image.fromarray(result)
    result = result.resize([w,h])
    
    result = np.array(result)
    return result
Exemple #29
0
    def __getitem__(self, index):
        ### input A
        A_path = self.A_paths[index]
        A = Image.open(A_path)
        params = get_params(self.opt, A.size)
        transform_A = get_transform_sketch(self.opt, params)
        A_tensor = transform_A(A.convert('RGB'))  # A_tensor: [-1, 1]

        ### input B (real images)
        B_tensor = inst_tensor = feat_tensor = C_tensor = 0
        if self.opt.isTrain or self.opt.use_encoded_image:
            B_path = self.B_paths[index]
            B = Image.open(B_path).convert('RGB')
            transform_B = get_transform(self.opt, params)
            B_tensor = transform_B(B)

        ### input C
        rand_index = np.random.randint(len(self.C_list_paths[index]))
        C_path = self.C_list_paths[index][rand_index]
        C = Image.open(C_path).convert('RGB')
        transform_C = get_transform_sketch(self.opt, params)
        C_tensor = transform_C(C)

        if self.opt.mix_sketch:
            rand_mix = np.random.randint(2)

        input_dict = {}
        input_dict[
            'sketch'] = C_tensor if self.opt.mix_sketch and rand_mix else A_tensor
        input_dict['photo'] = B_tensor
        input_dict[
            'sketch_deform'] = A_tensor if self.opt.mix_sketch and rand_mix else C_tensor
        input_dict['path'] = A_path

        return input_dict
Exemple #30
0
    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor) - - the gray image
            B (tensor) - - the corrsponding RGB
            A_paths (str) - - image paths
            B_paths (str) - - image paths (same as A_paths)
        """
        # read a image given a random integer index
        AB_path = self.AB_paths[index]
        AB = Image.open(AB_path).convert('RGB')
        # split AB image into A and B
        w, h = AB.size
        A = toimage(np.array(AB) @ np.array([0.2125, 0.7154, 0.0721]),
                    mode="L")
        B = AB

        # apply the same transform to both A and B
        transform_params = get_params(self.opt, (w, h))
        A_transform = get_transform_one_channel(self.opt,
                                                transform_params,
                                                grayscale=False)
        B_transform = get_transform(self.opt,
                                    transform_params,
                                    grayscale=False)

        A = A_transform(A)
        B = B_transform(B)

        return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
    def __getitem__(self, index):        
        ### input A (label maps)
        A_path = self.A_paths[index]              
        A = Image.open(A_path)        
        params = get_params(self.opt, A.size)
        if self.opt.label_nc == 0:
            transform_A = get_transform(self.opt, params)
            A_tensor = transform_A(A.convert('RGB'))
        else:
            transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
            A_tensor = transform_A(A) * 255.0

        B_tensor = inst_tensor = feat_tensor = 0
        ### input B (real images)
        if self.opt.isTrain:
            B_path = self.B_paths[index]   
            B = Image.open(B_path).convert('RGB')
            transform_B = get_transform(self.opt, params)      
            B_tensor = transform_B(B)

        ### if using instance maps        
        if not self.opt.no_instance:
            inst_path = self.inst_paths[index]
            inst = Image.open(inst_path)
            inst_tensor = transform_A(inst)

            if self.opt.load_features:
                feat_path = self.feat_paths[index]            
                feat = Image.open(feat_path).convert('RGB')
                norm = normalize()
                feat_tensor = norm(transform_A(feat))                            

        input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 
                      'feat': feat_tensor, 'path': A_path}

        return input_dict