Exemplo n.º 1
0
    def run(self) -> None:
        model_chain = (self.model_str.split("+")
                       if "+" in self.model_str else self.model_str.split(">"))

        for idx, model in enumerate(model_chain):

            interpolations = (model.split("|")
                              if "|" in self.model_str else model.split("&"))

            if len(interpolations) > 1:
                for i, interpolation in enumerate(interpolations):
                    interp_model, interp_amount = (interpolation.split("@")
                                                   if "@" in interpolation else
                                                   interpolation.split(":"))
                    interp_model = self.__check_model_path(interp_model)
                    interpolations[i] = f"{interp_model}@{interp_amount}"
                model_chain[idx] = "&".join(interpolations)
            else:
                model_chain[idx] = self.__check_model_path(model)

        if not self.input.exists():
            self.log.error(f'Folder "{self.input}" does not exist.')
            sys.exit(1)
        elif self.input.is_file():
            self.log.error(f'Folder "{self.input}" is a file.')
            sys.exit(1)
        elif self.output.is_file():
            self.log.error(f'Folder "{self.output}" is a file.')
            sys.exit(1)
        elif not self.output.exists():
            self.output.mkdir(parents=True)

        print('Model{:s}: "{:s}"'.format(
            "s" if len(model_chain) > 1 else "",
            # ", ".join([Path(x).stem for x in model_chain]),
            ", ".join([x for x in model_chain]),
        ))

        images: List[Path] = []
        for ext in ["png", "jpg", "jpeg", "gif", "bmp", "tiff", "tga"]:
            images.extend(self.input.glob(f"**/*.{ext}"))

        # Store the maximum split depths for each model in the chain
        # TODO: there might be a better way of doing this but it's good enough for now
        split_depths = {}

        with Progress(
                # SpinnerColumn(),
                "[progress.description]{task.description}",
                BarColumn(),
                "[progress.percentage]{task.percentage:>3.0f}%",
                TimeRemainingColumn(),
        ) as progress:
            task_upscaling = progress.add_task("Upscaling", total=len(images))
            for idx, img_path in enumerate(images, 1):
                img_input_path_rel = img_path.relative_to(self.input)
                output_dir = self.output.joinpath(img_input_path_rel).parent
                img_output_path_rel = output_dir.joinpath(
                    f"{img_path.stem}.png")
                output_dir.mkdir(parents=True, exist_ok=True)
                if len(model_chain) == 1:
                    self.log.info(
                        f'Processing {str(idx).zfill(len(str(len(images))))}: "{img_input_path_rel}"'
                    )
                if self.skip_existing and img_output_path_rel.is_file():
                    self.log.warning("Already exists, skipping")
                    if self.delete_input:
                        img_path.unlink(missing_ok=True)
                    progress.advance(task_upscaling)
                    continue
                # read image
                img = cv2.imread(str(img_path.absolute()),
                                 cv2.IMREAD_UNCHANGED)
                if len(img.shape) < 3:
                    img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

                # Seamless modes
                if self.seamless == SeamlessOptions.TILE:
                    img = cv2.copyMakeBorder(img, 16, 16, 16, 16,
                                             cv2.BORDER_WRAP)
                elif self.seamless == SeamlessOptions.MIRROR:
                    img = cv2.copyMakeBorder(img, 16, 16, 16, 16,
                                             cv2.BORDER_REFLECT_101)
                elif self.seamless == SeamlessOptions.REPLICATE:
                    img = cv2.copyMakeBorder(img, 16, 16, 16, 16,
                                             cv2.BORDER_REPLICATE)
                elif self.seamless == SeamlessOptions.ALPHA_PAD:
                    img = cv2.copyMakeBorder(img,
                                             16,
                                             16,
                                             16,
                                             16,
                                             cv2.BORDER_CONSTANT,
                                             value=[0, 0, 0, 0])
                final_scale: int = 1

                task_model_chain: TaskID = None
                if len(model_chain) > 1:
                    task_model_chain = progress.add_task(
                        f'{str(idx).zfill(len(str(len(images))))} - "{img_input_path_rel}"',
                        total=len(model_chain),
                    )
                for i, model_path in enumerate(model_chain):

                    img_height, img_width = img.shape[:2]

                    # Load the model so we can access the scale
                    self.load_model(model_path)

                    if self.cache_max_split_depth and len(
                            split_depths.keys()) > 0:
                        rlt, depth = ops.auto_split_upscale(
                            img,
                            self.upscale,
                            self.last_scale,
                            max_depth=split_depths[i],
                        )
                    else:
                        rlt, depth = ops.auto_split_upscale(
                            img, self.upscale, self.last_scale)
                        split_depths[i] = depth

                    final_scale *= self.last_scale

                    # This is for model chaining
                    img = rlt.astype("uint8")
                    if len(model_chain) > 1:
                        progress.advance(task_model_chain)

                if self.seamless:
                    rlt = self.crop_seamless(rlt, final_scale)

                cv2.imwrite(str(img_output_path_rel.absolute()), rlt)

                if self.delete_input:
                    img_path.unlink(missing_ok=True)

                progress.advance(task_upscaling)
Exemplo n.º 2
0
    def image(
        self,
        img: np.ndarray,
        device: torch.device = None,
        # progress: Progress = None,
        # progress_text: str = "",
        multi_gpu_release_device=True,
    ) -> np.ndarray:
        self.in_nc = None
        self.out_nc = None

        # Store the maximum split depths for each model in the chain
        split_depths = {}

        if len(img.shape) < 3:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

        # Seamless modes
        if self.seamless == SeamlessOptions.tile:
            img = cv2.copyMakeBorder(img, 16, 16, 16, 16, cv2.BORDER_WRAP)
        elif self.seamless == SeamlessOptions.mirror:
            img = cv2.copyMakeBorder(img, 16, 16, 16, 16,
                                     cv2.BORDER_REFLECT_101)
        elif self.seamless == SeamlessOptions.replicate:
            img = cv2.copyMakeBorder(img, 16, 16, 16, 16, cv2.BORDER_REPLICATE)
        elif self.seamless == SeamlessOptions.alpha_pad:
            img = cv2.copyMakeBorder(img,
                                     16,
                                     16,
                                     16,
                                     16,
                                     cv2.BORDER_CONSTANT,
                                     value=[0, 0, 0, 0])
        final_scale: int = 1

        # task_model_chain: TaskID = None
        # if len(self.model_chain) > 1 and progress:
        #     task_model_chain = progress.add_task(
        #         progress_text, total=len(self.model_chain)
        #     )

        if device == None:
            if self.multi_gpu:
                device, _ = self.get_available_device()
            else:
                device = list(self.devices.keys())[0]
        for i, model_path in enumerate(self.model_chain):
            # img_height, img_width = img.shape[:2]

            # Load the model so we can access the scale
            self.load_model(model_path)

            if self.cache_max_split_depth and len(split_depths.keys()) > 0:
                rlt, depth = ops.auto_split_upscale(
                    img,
                    device,
                    self.upscale,
                    self.last_scale,
                    max_depth=split_depths[i],
                )
            else:
                rlt, depth = ops.auto_split_upscale(img, device, self.upscale,
                                                    self.last_scale)
                split_depths[i] = depth

            final_scale *= self.last_scale

            # This is for model chaining
            img = rlt.astype("uint8")
            # if len(self.model_chain) > 1 and progress:
            #     progress.advance(task_model_chain)

        if self.seamless:
            img = self.crop_seamless(img, final_scale)
            # img = img.astype("uint8")

        if self.multi_gpu and multi_gpu_release_device:
            self.devices[device][0].release()

        return img