Esempio n. 1
0
def get_corresponding_file(original, target_dir, target_ext=None):
    """
    Say an original file is
        dataroot/subject/body/SAMPLE_ID.jpg

    And we want the corresponding file
        dataroot/subject/cloth/SAMPLE_ID.npz

    The corresponding file is in target_dir dataroot/subject/cloth, so we replace the
    top level directories with the target dir

    Args:
        original:
        target_dir:
        target_ext:

    Returns:

    """
    # number of top dir to replace
    num_top_parts = len(target_dir.split(os.path.sep))
    # replace the top dirs
    top_removed = remove_top_dir(original, num_top_parts)
    target_file = os.path.join(target_dir, top_removed)
    # extension of files in the target dir
    if not target_ext:
        target_ext = get_dir_file_extension(target_dir)
    # change the extension
    target_file = remove_extension(target_file) + target_ext
    return target_file
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
    """Save images to the disk.

    Parameters:
        webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
        visuals (OrderedDict)    -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
        image_path (str)         -- the string is used to create image paths
        aspect_ratio (float)     -- the aspect ratio of saved images
        width (int)              -- the images will be resized to width x width

    This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
    """
    image_dir = webpage.get_image_dir()
    name = "_to_".join(
        [remove_extension(ntpath.basename(p)) for p in image_path[0]])

    webpage.add_header(name)
    ims, txts, links = [], [], []

    for label, im_data in visuals.items():
        im = util.tensor2im(im_data)
        image_name = f'{name}_{label}.png'
        save_path = os.path.join(image_dir, image_name)
        util.save_image(im, save_path, aspect_ratio=aspect_ratio)
        ims.append(image_name)
        txts.append(label)
        links.append(image_name)
    webpage.add_images(ims, txts, links, width=width)
Esempio n. 3
0
 def save_cloths_npz(local):
     """
     We must store the intermediate cloths as .npz files
     """
     name = "_to_".join([
         remove_extension(os.path.basename(p))
         for p in local["image_paths"][0]
     ])
     out_name = os.path.join(warp_out, name)
     # save the warped cloths
     compress_and_save_cloth(local["model"].fakes[0], out_name)
    def __getitem__(self, index: int):
        """ """
        # (1) Get target texture.
        target_texture_file = self.texture_files[index]
        target_texture_img = Image.open(target_texture_file).convert("RGB")

        target_texture_tensor = self._normalize_texture(
            tf.to_tensor(tf.resize(target_texture_img, self.opt.load_size)))

        # file id for matching cloth and matching ROI
        file_id = remove_prefix(remove_extension(target_texture_file),
                                self.texture_dir + "/")

        # (2) Get corresponding cloth if train, else cloth at index if inference.
        cloth_file = (os.path.join(self.cloth_dir, file_id + self.cloth_ext)
                      if self.is_train else self.cloth_files[index])
        cloth_tensor = decompress_cloth_segment(cloth_file, n_labels=19)
        # resize cloth tensor
        # We have to unsqueeze because interpolate expects batch in dim1
        cloth_tensor = nn.functional.interpolate(
            cloth_tensor.unsqueeze(0), size=self.opt.load_size).squeeze()

        # (3) Get and scale corresponding roi.
        original_size = target_texture_img.size[0]  # PIL width
        scale = float(self.opt.load_size) / original_size
        rois = np.rint(self.rois_df.loc[file_id].values * scale)
        rois_tensor = torch.from_numpy(rois)

        # (4) Get randomly flipped input.
        # input will be randomly flipped of target; if we flip input, we must flip rois
        hflip = (0.5 if any(t in self.opt.input_transforms
                            for t in ("hflip", "all")) else 0)
        vflip = (0.5 if any(t in self.opt.input_transforms
                            for t in ("vflip", "all")) else 0)
        input_texture_image, rois_tensor = random_image_roi_flip(
            target_texture_img, rois_tensor, vp=vflip, hp=hflip)
        input_texture_tensor = self._normalize_texture(
            tf.to_tensor(tf.resize(input_texture_image, self.opt.load_size)))

        # do cropping if needed
        if self.crop_bounds:
            input_texture_tensor, cloth_tensor, target_texture_tensor = crop_tensors(
                input_texture_tensor,
                cloth_tensor,
                target_texture_tensor,
                crop_bounds=self.crop_bounds,
            )
            rois_tensor = crop_rois(rois_tensor, self.crop_bounds)

        # assert shapes
        assert (
            input_texture_tensor.shape[-2:] == target_texture_tensor.shape[-2:]
            == cloth_tensor.shape[-2:]
        ), f"input {input_texture_tensor.shape}; target {target_texture_tensor.shape}; cloth {cloth_tensor.shape}"

        return {
            "texture_paths": target_texture_file,
            "input_textures": input_texture_tensor,
            "rois": rois_tensor,
            "cloth_paths": cloth_file,
            "cloths": cloth_tensor,
            "target_textures": target_texture_tensor,
        }