Esempio n. 1
0
 def __init__(self, arguments):
     logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                  arguments)
     self.args = arguments
     Utils.set_verbosity(self.args.loglevel)
     self.output_dir = get_folder(self.args.output_dir)
     logger.info("Output Directory: %s", self.args.output_dir)
     self.images = Images(self.args)
     self.alignments = Alignments(self.args, True, self.images.is_video)
     self.post_process = PostProcess(arguments)
     configfile = self.args.configfile if hasattr(self.args,
                                                  "configfile") else None
     normalization = None if self.args.normalization == "none" else self.args.normalization
     self.extractor = Extractor(self.args.detector,
                                self.args.aligner,
                                self.args.loglevel,
                                configfile=configfile,
                                multiprocess=not self.args.singleprocess,
                                rotate_images=self.args.rotate_images,
                                min_size=self.args.min_size,
                                normalize_method=normalization)
     self.save_queue = queue_manager.get_queue("extract_save")
     self.verify_output = False
     self.save_interval = None
     if hasattr(self.args, "save_interval"):
         self.save_interval = self.args.save_interval
     logger.debug("Initialized %s", self.__class__.__name__)
Esempio n. 2
0
    def __init__(self, arguments, sample_size, display, lock, trigger_patch):
        logger.debug(
            "Initializing %s: (arguments: '%s', sample_size: %s, display: %s, lock: %s, "
            "trigger_patch: %s)", self.__class__.__name__, arguments,
            sample_size, display, lock, trigger_patch)
        self.sample_size = sample_size
        self.display = display
        self.lock = lock
        self.trigger_patch = trigger_patch
        self.input_images = list()
        self.predicted_images = list()

        self.images = Images(arguments)
        self.alignments = Alignments(arguments,
                                     is_extract=False,
                                     input_is_video=self.images.is_video)
        if not self.alignments.have_alignments_file:
            logger.error("Alignments file not found at: '%s'",
                         self.alignments.file)
            exit(1)
        self.filelist = self.get_filelist()
        self.indices = self.get_indices()

        self.predictor = Predict(queue_manager.get_queue("preview_predict_in"),
                                 sample_size, arguments)
        self.generate()

        logger.debug("Initialized %s", self.__class__.__name__)
Esempio n. 3
0
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)
Esempio n. 4
0
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        print("Output Directory: {}".format(self.args.output_dir))
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True)
        self.plugins = Plugins(self.args)

        self.post_process = PostProcess(arguments)

        self.export_face = True
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
Esempio n. 5
0
File: convert.py Progetto: aejot/fs
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        self.extract_faces = False
        self.faces_count = 0

        self.images = Images(self.args)
        self.alignments = Alignments(self.args, False)

        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images)

        self.post_process = PostProcess(arguments)
        self.verify_output = False

        self.opts = OptionalActions(self.args, self.images.input_images)
Esempio n. 6
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.plugins = Plugins(self.args)

        self.post_process = PostProcess(arguments)

        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)
Esempio n. 7
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)

        self.patch_threads = None
        self.images = Images(self.args)
        self.validate()
        self.alignments = Alignments(self.args, False, self.images.is_video)
        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)

        self.add_queues()
        self.disk_io = DiskIO(self.alignments, self.images, arguments)
        self.predictor = Predict(self.disk_io.load_queue, self.queue_size,
                                 arguments)

        configfile = self.args.configfile if hasattr(self.args,
                                                     "configfile") else None
        self.converter = Converter(get_folder(self.args.output_dir),
                                   self.predictor.output_size,
                                   self.predictor.has_predicted_mask,
                                   self.disk_io.draw_transparent,
                                   self.disk_io.pre_encode,
                                   arguments,
                                   configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)
Esempio n. 8
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._patch_threads = None
        self._images = Images(self._args)
        self._alignments = Alignments(self._args, False, self._images.is_video)

        self._opts = OptionalActions(self._args, self._images.input_images,
                                     self._alignments)

        self._add_queues()
        self._disk_io = DiskIO(self._alignments, self._images, arguments)
        self._predictor = Predict(self._disk_io.load_queue, self._queue_size,
                                  arguments)
        self._validate()
        get_folder(self._args.output_dir)

        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        self._converter = Converter(self._predictor.output_size,
                                    self._predictor.coverage_ratio,
                                    self._disk_io.draw_transparent,
                                    self._disk_io.pre_encode,
                                    arguments,
                                    configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)
Esempio n. 9
0
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)
Esempio n. 10
0
    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True
Esempio n. 11
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        self.extract_faces = False
        self.faces_count = 0

        self.images = Images(self.args)
        self.alignments = Alignments(self.args, False, self.images.is_video)

        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images,
               arguments.input_aligned_dir)

        self.post_process = PostProcess(arguments)
        self.verify_output = False

        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)
        logger.debug("Initialized %s", self.__class__.__name__)
Esempio n. 12
0
    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True
        self.save_interval = self.args.save_interval if hasattr(
            self.args, "save_interval") else None
Esempio n. 13
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector,
                                   self.args.aligner,
                                   self.args.loglevel,
                                   self.args.multiprocess,
                                   self.args.rotate_images,
                                   self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)
Esempio n. 14
0
class Extract():
    """ The extract process. """

    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        print("Output Directory: {}".format(self.args.output_dir))
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True)
        self.plugins = Plugins(self.args)

        self.post_process = PostProcess(arguments)

        self.export_face = True
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval

    def process(self):
        """ Perform the extraction process """
        print('Starting, this may take a while...')
        Utils.set_verbosity(self.args.verbose)
#        queue_manager.debug_monitor(1)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction(save_thread)
        self.alignments.save()
        Utils.finalize(self.images.images_found,
                       self.alignments.faces_count,
                       self.verify_output)
        self.plugins.process_detect.join()
        self.plugins.process_align.join()

    def threaded_io(self, task, io_args=None):
        """ Load images in a background thread """
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(thread_count=1)
        io_thread.in_thread(func, *io_args)
        return io_thread

    def load_images(self):
        """ Load the images """
        load_queue = queue_manager.get_queue("load")
        for filename, image in self.images.load():
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                continue
            load_queue.put((filename, image))
        load_queue.put("EOF")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        load_queue = queue_manager.get_queue("detect")
        for filename, image in self.images.load():
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")

    def save_faces(self):
        """ Save the generated faces """
        if not self.export_face:
            return

        save_queue = queue_manager.get_queue("save")
        while True:
            item = save_queue.get()
            if item == "EOF":
                break
            filename, output_file, resized_face, idx = item
            out_filename = "{}_{}{}".format(str(output_file),
                                            str(idx),
                                            Path(filename).suffix)
            # pylint: disable=no-member
            cv2.imwrite(out_filename, resized_face)

    def run_extraction(self, save_thread):
        """ Run Face Detection """
        to_process = self.process_item_count()
        frame_no = 0
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args, "align_eyes") else False

        if self.plugins.is_parallel:
            self.plugins.launch_aligner()
            self.plugins.launch_detector()
        if not self.plugins.is_parallel:
            self.run_detection(to_process)
            self.plugins.launch_aligner()

        for faces in tqdm(self.plugins.detect_faces(extract_pass="******"),
                          total=to_process,
                          file=sys.stdout,
                          desc="Extracting faces"):

            exception = faces.get("exception", False)
            if exception:
                exit(1)
            filename = faces["filename"]
            faces["output_file"] = self.output_dir / Path(filename).stem

            self.align_face(faces, align_eyes, size)
            self.post_process.do_actions(faces)

            faces_count = len(faces["detected_faces"])
            if self.args.verbose and faces_count == 0:
                print("Warning: No faces were detected in image: "
                      "{}".format(os.path.basename(filename)))

            if not self.verify_output and faces_count > 1:
                self.verify_output = True

            self.process_faces(filename, faces)

            frame_no += 1
            if frame_no == self.save_interval:
                self.alignments.save()
                frame_no = 0

        if self.export_face:
            queue_manager.get_queue("save").put("EOF")
        save_thread.join_threads()

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(os.path.basename(frame) in self.alignments.data.keys()
                        for frame in self.images.input_images)

        if processed != 0 and self.args.skip_existing:
            print("Skipping {} previously extracted frames".format(processed))
        if processed != 0 and self.args.skip_faces:
            print("Skipping {} frames with detected faces".format(processed))

        to_process = self.images.images_found - processed
        if to_process == 0:
            print("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_detection(self, to_process):
        """ Run detection only """
        self.plugins.launch_detector()
        detected_faces = dict()
        for detected in tqdm(self.plugins.detect_faces(extract_pass="******"),
                             total=to_process,
                             file=sys.stdout,
                             desc="Detecting faces"):
            exception = detected.get("exception", False)
            if exception:
                break

            del detected["image"]
            filename = detected["filename"]

            detected_faces[filename] = detected

        self.threaded_io("reload", detected_faces)

    @staticmethod
    def align_face(faces, align_eyes, size, padding=48):
        """ Align the detected face """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_dlib_rect(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.frame_dims = image.shape[:2]
            detected_face.load_aligned(image,
                                       size=size,
                                       padding=padding,
                                       align_eyes=align_eyes)
            final_faces.append(detected_face)
        faces["detected_faces"] = final_faces

    def process_faces(self, filename, faces):
        """ Perform processing on found faces """
        final_faces = list()
        save_queue = queue_manager.get_queue("save")
        filename = faces["filename"]
        output_file = faces["output_file"]

        for idx, face in enumerate(faces["detected_faces"]):
            if self.export_face:
                save_queue.put((filename,
                                output_file,
                                face.aligned_face,
                                idx))

            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Esempio n. 15
0
class Extract():
    """ The extract process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True)
        self.plugins = Plugins(self.args)

        self.post_process = PostProcess(arguments)

        self.export_face = True
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)

    def process(self):
        """ Perform the extraction process """
        logger.info('Starting, this may take a while...')
        Utils.set_verbosity()
        #        queue_manager.debug_monitor(1)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction(save_thread)
        self.alignments.save()
        Utils.finalize(self.images.images_found, self.alignments.faces_count,
                       self.verify_output)

    def threaded_io(self, task, io_args=None):
        """ Load images in a background thread """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        return io_thread

    def load_images(self):
        """ Load the images """
        logger.debug("Load Images: Start")
        load_queue = queue_manager.get_queue("load")
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                logger.trace("Skipping image: '%s'", filename)
                continue
            item = {"filename": filename, "image": image}
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        logger.debug("Reload Images: Start. Detected Faces Count: %s",
                     len(detected_faces))
        load_queue = queue_manager.get_queue("detect")
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def save_faces(self):
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        if not self.export_face:
            logger.debug("Not exporting faces")
            logger.debug("Save Faces: Complete")
            return

        save_queue = queue_manager.get_queue("save")
        while True:
            if save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = save_queue.get()
            if item == "EOF":
                break
            filename, output_file, resized_face, idx = item
            out_filename = "{}_{}{}".format(str(output_file), str(idx),
                                            Path(filename).suffix)
            logger.trace("Saving face: '%s'", out_filename)
            try:
                cv2.imwrite(out_filename, resized_face)  # pylint: disable=no-member
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s",
                             out_filename, err)
                continue
        logger.debug("Save Faces: Complete")

    def run_extraction(self, save_thread):
        """ Run Face Detection """
        save_queue = queue_manager.get_queue("save")
        to_process = self.process_item_count()
        frame_no = 0
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args,
                                                     "align_eyes") else False

        if self.plugins.is_parallel:
            logger.debug("Using parallel processing")
            self.plugins.launch_aligner()
            self.plugins.launch_detector()
        if not self.plugins.is_parallel:
            logger.debug("Using serial processing")
            self.run_detection(to_process)
            self.plugins.launch_aligner()

        for faces in tqdm(self.plugins.detect_faces(extract_pass="******"),
                          total=to_process,
                          file=sys.stdout,
                          desc="Extracting faces"):

            filename = faces["filename"]

            self.align_face(faces, align_eyes, size, filename)
            self.post_process.do_actions(faces)

            faces_count = len(faces["detected_faces"])
            if faces_count == 0:
                logger.verbose("No faces were detected in image: %s",
                               os.path.basename(filename))

            if not self.verify_output and faces_count > 1:
                self.verify_output = True

            self.process_faces(filename, faces, save_queue)

            frame_no += 1
            if frame_no == self.save_interval:
                self.alignments.save()
                frame_no = 0

        if self.export_face:
            save_queue.put("EOF")
        save_thread.join()

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(
            os.path.basename(frame) in self.alignments.data.keys()
            for frame in self.images.input_images)
        logger.debug("Items already processed: %s", processed)

        if processed != 0 and self.args.skip_existing:
            logger.info("Skipping previously extracted frames: %s", processed)
        if processed != 0 and self.args.skip_faces:
            logger.info("Skipping frames with detected faces: %s", processed)

        to_process = self.images.images_found - processed
        logger.debug("Items to be Processed: %s", to_process)
        if to_process == 0:
            logger.error("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_detection(self, to_process):
        """ Run detection only """
        self.plugins.launch_detector()
        detected_faces = dict()
        for detected in tqdm(self.plugins.detect_faces(extract_pass="******"),
                             total=to_process,
                             file=sys.stdout,
                             desc="Detecting faces"):
            exception = detected.get("exception", False)
            if exception:
                break

            del detected["image"]
            filename = detected["filename"]

            detected_faces[filename] = detected

        self.threaded_io("reload", detected_faces)

    def align_face(self, faces, align_eyes, size, filename, padding=48):
        """ Align the detected face and add the destination file path """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_dlib_rect(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.frame_dims = image.shape[:2]
            detected_face.load_aligned(image,
                                       size=size,
                                       padding=padding,
                                       align_eyes=align_eyes)
            final_faces.append({
                "file_location":
                self.output_dir / Path(filename).stem,
                "face":
                detected_face
            })
        faces["detected_faces"] = final_faces

    def process_faces(self, filename, faces, save_queue):
        """ Perform processing on found faces """
        final_faces = list()
        filename = faces["filename"]

        for idx, detected_face in enumerate(faces["detected_faces"]):
            if self.export_face:
                save_queue.put((filename, detected_face["file_location"],
                                detected_face["face"].aligned_face, idx))

            final_faces.append(detected_face["face"].to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Esempio n. 16
0
class Convert():
    """ The convert process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        self.extract_faces = False
        self.faces_count = 0

        self.images = Images(self.args)
        self.alignments = Alignments(self.args, False, self.images.is_video)

        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images,
               arguments.input_aligned_dir)

        self.post_process = PostProcess(arguments)
        self.verify_output = False

        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)
        logger.debug("Initialized %s", self.__class__.__name__)

    def process(self):
        """ Original & LowMem models go with Adjust or Masked converter

            Note: GAN prediction outputs a mask + an image, while other
            predicts only an image. """
        Utils.set_verbosity(self.args.loglevel)

        if not self.alignments.have_alignments_file:
            self.load_extractor()

        model = self.load_model()
        converter = self.load_converter(model)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        for item in batch.iterator():
            self.convert(converter, item)

        if self.extract_faces:
            queue_manager.terminate_queues()

        Utils.finalize(self.images.images_found, self.faces_count,
                       self.verify_output)

    def load_extractor(self):
        """ Set on the fly extraction """
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning(
            "NB: This will use the inferior dlib-hog for extraction "
            "and dlib pose predictor for landmarks. It is recommended "
            "to perfom Extract first for superior results")
        for task in ("load", "detect", "align"):
            queue_manager.add_queue(task, maxsize=0)

        detector = PluginLoader.get_detector("dlib_hog")(
            loglevel=self.args.loglevel)
        aligner = PluginLoader.get_aligner("dlib")(loglevel=self.args.loglevel)

        d_kwargs = {
            "in_queue": queue_manager.get_queue("load"),
            "out_queue": queue_manager.get_queue("detect")
        }
        a_kwargs = {
            "in_queue": queue_manager.get_queue("detect"),
            "out_queue": queue_manager.get_queue("align")
        }

        d_process = SpawnProcess(detector.run, **d_kwargs)
        d_event = d_process.event
        d_process.start()

        a_process = SpawnProcess(aligner.run, **a_kwargs)
        a_event = a_process.event
        a_process.start()

        d_event.wait(10)
        if not d_event.is_set():
            raise ValueError("Error inititalizing Detector")
        a_event.wait(10)
        if not a_event.is_set():
            raise ValueError("Error inititalizing Aligner")

        self.extract_faces = True

    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            logger.error("Model Not Found! A valid model "
                         "must be provided to continue!")
            exit(1)

        return model

    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(
            model.converter(False),
            trainer=args.trainer,
            blur_size=args.blur_size,
            seamless_clone=args.seamless_clone,
            sharpen_image=args.sharpen_image,
            mask_type=args.mask_type,
            erosion_kernel_size=args.erosion_kernel_size,
            match_histogram=args.match_histogram,
            smooth_mask=args.smooth_mask,
            avg_color_adjust=args.avg_color_adjust,
            draw_transparent=args.draw_transparent)

        return converter

    def prepare_images(self):
        """ Prepare the images for conversion """
        filename = ""
        for filename, image in tqdm(self.images.load(),
                                    total=self.images.images_found,
                                    file=sys.stdout):

            if (self.args.discard_frames
                    and self.opts.check_skipframe(filename) == "discard"):
                continue

            frame = os.path.basename(filename)
            if self.extract_faces:
                detected_faces = self.detect_faces(filename, image)
            else:
                detected_faces = self.alignments_faces(frame, image)

            faces_count = len(detected_faces)
            if faces_count != 0:
                # Post processing requires a dict with "detected_faces" key
                self.post_process.do_actions(
                    {"detected_faces": detected_faces})
                self.faces_count += faces_count

            if faces_count > 1:
                self.verify_output = True
                logger.verbose("Found more than one face in "
                               "an image! '%s'", frame)

            yield filename, image, detected_faces

    @staticmethod
    def detect_faces(filename, image):
        """ Extract the face from a frame (If not alignments file found) """
        queue_manager.get_queue("load").put((filename, image))
        item = queue_manager.get_queue("align").get()
        detected_faces = item["detected_faces"]
        return detected_faces

    def alignments_faces(self, frame, image):
        """ Get the face from alignments file """
        if not self.check_alignments(frame):
            return None

        faces = self.alignments.get_faces_in_frame(frame)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def check_alignments(self, frame):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.alignments.frame_exists(frame)
        if not have_alignments:
            tqdm.write("No alignment found for {}, " "skipping".format(frame))
        return have_alignments

    def convert(self, converter, item):
        """ Apply the conversion transferring faces onto frames """
        try:
            filename, image, faces = item
            skip = self.opts.check_skipframe(filename)

            if not skip:
                for face in faces:
                    image = self.convert_one_face(converter, image, face)
                filename = str(self.output_dir / Path(filename).name)
                cv2.imwrite(filename, image)  # pylint: disable=no-member
        except Exception as err:
            logger.error("Failed to convert image: '%s'. Reason: %s", filename,
                         err)
            raise

    def convert_one_face(self, converter, image, face):
        """ Perform the conversion on the given frame for a single face """
        # TODO: This switch between 64 and 128 is a hack for now.
        # We should have a separate cli option for size
        size = 128 if (self.args.trainer.strip().lower()
                       in ('gan128', 'originalhighres')) else 64

        image = converter.patch_image(image, face, size)
        return image
Esempio n. 17
0
class Samples():
    """ Holds 5 random test faces """

    def __init__(self, arguments, sample_size, display, lock, trigger_patch):
        logger.debug("Initializing %s: (arguments: '%s', sample_size: %s, display: %s, lock: %s, "
                     "trigger_patch: %s)", self.__class__.__name__, arguments, sample_size,
                     display, lock, trigger_patch)
        self.sample_size = sample_size
        self.display = display
        self.lock = lock
        self.trigger_patch = trigger_patch
        self.input_images = list()
        self.predicted_images = list()

        self.images = Images(arguments)
        self.alignments = Alignments(arguments,
                                     is_extract=False,
                                     input_is_video=self.images.is_video)
        self.filelist = self.get_filelist()
        self.indices = self.get_indices()

        self.predictor = Predict(queue_manager.get_queue("preview_predict_in"),
                                 sample_size,
                                 arguments)
        self.generate()

        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def random_choice(self):
        """ Return for random indices from the indices group """
        retval = [random.choice(indices) for indices in self.indices]
        logger.debug(retval)
        return retval

    def get_filelist(self):
        """ Return a list of files, filtering out those frames which do not contain faces """
        logger.debug("Filtering file list to frames with faces")
        if self.images.is_video:
            filelist = ["{}_{:06d}.png".format(os.path.splitext(self.images.input_images)[0],
                                               frame_no)
                        for frame_no in range(1, self.images.images_found + 1)]
        else:
            filelist = self.images.input_images

        retval = [filename for filename in filelist
                  if self.alignments.frame_has_faces(os.path.basename(filename))]
        logger.debug("Filtered out frames: %s", self.images.images_found - len(retval))
        return retval

    def get_indices(self):
        """ Returns a list of 'self.sample_size' evenly sized partition indices
            pertaining to the filtered file list """
        # Remove start and end values to get a list divisible by self.sample_size
        no_files = len(self.filelist)
        crop = no_files % self.sample_size
        top_tail = list(range(no_files))[
            crop // 2:no_files - (crop - (crop // 2))]
        # Partition the indices
        size = len(top_tail)
        retval = [top_tail[start:start + size // self.sample_size]
                  for start in range(0, size, size // self.sample_size)]
        logger.debug("Indices pools: %s", ["{}: (start: {}, end: {}, size: {})".format(idx,
                                                                                       min(pool),
                                                                                       max(pool),
                                                                                       len(pool))
                                           for idx, pool in enumerate(retval)])
        return retval

    def generate(self):
        """ Generate a random test set """
        self.load_frames()
        self.predict()
        self.trigger_patch.set()

    def load_frames(self):
        """ Load a sample of random frames """
        self.input_images = list()
        for selection in self.random_choice:
            filename = os.path.basename(self.filelist[selection])
            image = self.images.load_one_image(self.filelist[selection])
            # Get first face only
            face = self.alignments.get_faces_in_frame(filename)[0]
            detected_face = DetectedFace()
            detected_face.from_alignment(face, image=image)
            self.input_images.append({"filename": filename,
                                      "image": image,
                                      "detected_faces": [detected_face]})
        self.display.source = self.input_images
        self.display.update_source = True
        logger.debug("Selected frames: %s", [frame["filename"] for frame in self.input_images])

    def predict(self):
        """ Predict from the loaded frames """
        with self.lock:
            self.predicted_images = list()
            for frame in self.input_images:
                self.predictor.in_queue.put(frame)
            idx = 0
            while idx < self.sample_size:
                logger.debug("Predicting face %s of %s", idx + 1, self.sample_size)
                item = self.predictor.out_queue.get()
                if item == "EOF":
                    logger.debug("Received EOF")
                    break
                self.predicted_images.append(item)
                logger.debug("Predicted face %s of %s", idx + 1, self.sample_size)
                idx += 1
        logger.debug("Predicted faces")
Esempio n. 18
0
class Extract():
    """ The extract process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector,
                                   self.args.aligner,
                                   self.args.loglevel,
                                   self.args.multiprocess,
                                   self.args.rotate_images,
                                   self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def skip_num(self):
        """ Number of frames to skip if extract_every_n is passed """
        return self.args.extract_every_n if hasattr(self.args, "extract_every_n") else 1

    def process(self):
        """ Perform the extraction process """
        logger.info('Starting, this may take a while...')
        # queue_manager.debug_monitor(3)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction()
        save_thread.join()
        self.alignments.save()
        Utils.finalize(self.images.images_found // self.skip_num,
                       self.alignments.faces_count,
                       self.verify_output)

    def threaded_io(self, task, io_args=None):
        """ Perform I/O task in a background thread """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        return io_thread

    def load_images(self):
        """ Load the images """
        logger.debug("Load Images: Start")
        load_queue = self.extractor.input_queue
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if idx % self.skip_num != 0:
                logger.trace("Skipping image '%s' due to extract_every_n = %s",
                             filename, self.skip_num)
                continue
            if image is None or not image.any():
                logger.warning("Unable to open image. Skipping: '%s'", filename)
                continue
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                logger.trace("Skipping image: '%s'", filename)
                continue
            item = {"filename": filename,
                    "image": image}
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        logger.debug("Reload Images: Start. Detected Faces Count: %s", len(detected_faces))
        load_queue = self.extractor.input_queue
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def save_faces(self):
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        while True:
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            logger.trace(item)
            if item == "EOF":
                break
            filename, face = item

            logger.trace("Saving face: '%s'", filename)
            try:
                with open(filename, "wb") as out_file:
                    out_file.write(face)
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s", filename, err)
                continue
        logger.debug("Save Faces: Complete")

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(os.path.basename(frame) in self.alignments.data.keys()
                        for frame in self.images.input_images)
        logger.debug("Items already processed: %s", processed)

        if processed != 0 and self.args.skip_existing:
            logger.info("Skipping previously extracted frames: %s", processed)
        if processed != 0 and self.args.skip_faces:
            logger.info("Skipping frames with detected faces: %s", processed)

        to_process = (self.images.images_found - processed) // self.skip_num
        logger.debug("Items to be Processed: %s", to_process)
        if to_process == 0:
            logger.error("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_extraction(self):
        """ Run Face Detection """
        to_process = self.process_item_count()
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args, "align_eyes") else False
        exception = False

        for phase in range(self.extractor.passes):
            if exception:
                break
            is_final = self.extractor.final_pass
            detected_faces = dict()
            self.extractor.launch()
            for idx, faces in enumerate(tqdm(self.extractor.detected_faces(),
                                             total=to_process,
                                             file=sys.stdout,
                                             desc="Running pass {} of {}: {}".format(
                                                 phase + 1,
                                                 self.extractor.passes,
                                                 self.extractor.phase.title()))):

                exception = faces.get("exception", False)
                if exception:
                    break
                filename = faces["filename"]

                if self.extractor.final_pass:
                    self.output_processing(faces, align_eyes, size, filename)
                    self.output_faces(filename, faces)
                    if self.save_interval and idx + 1 % self.save_interval == 0:
                        self.alignments.save()
                else:
                    del faces["image"]
                    detected_faces[filename] = faces

            if is_final:
                logger.debug("Putting EOF to save")
                self.save_queue.put("EOF")
            else:
                logger.debug("Reloading images")
                self.threaded_io("reload", detected_faces)

    def output_processing(self, faces, align_eyes, size, filename):
        """ Prepare faces for output """
        self.align_face(faces, align_eyes, size, filename)
        self.post_process.do_actions(faces)

        faces_count = len(faces["detected_faces"])
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(filename))

        if not self.verify_output and faces_count > 1:
            self.verify_output = True

    def align_face(self, faces, align_eyes, size, filename):
        """ Align the detected face and add the destination file path """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_bounding_box(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.load_aligned(image, size=size, align_eyes=align_eyes)
            final_faces.append({"file_location": self.output_dir / Path(filename).stem,
                                "face": detected_face})
        faces["detected_faces"] = final_faces

    def output_faces(self, filename, faces):
        """ Output faces to save thread """
        final_faces = list()
        for idx, detected_face in enumerate(faces["detected_faces"]):
            output_file = detected_face["file_location"]
            extension = Path(filename).suffix
            out_filename = "{}_{}{}".format(str(output_file), str(idx), extension)

            face = detected_face["face"]
            resized_face = face.aligned_face

            face.hash, img = hash_encode_image(resized_face, extension)
            self.save_queue.put((out_filename, img))
            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Esempio n. 19
0
class Extract():
    """ The extract process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector, self.args.aligner,
                                   self.args.loglevel, self.args.multiprocess,
                                   self.args.rotate_images, self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def skip_num(self):
        """ Number of frames to skip if extract_every_n is passed """
        return self.args.extract_every_n if hasattr(self.args,
                                                    "extract_every_n") else 1

    def process(self):
        """ Perform the extraction process """
        logger.info('Starting, this may take a while...')
        # queue_manager.debug_monitor(3)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction()
        save_thread.join()
        self.alignments.save()
        Utils.finalize(self.images.images_found // self.skip_num,
                       self.alignments.faces_count, self.verify_output)

    def threaded_io(self, task, io_args=None):
        """ Perform I/O task in a background thread """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        return io_thread

    def load_images(self):
        """ Load the images """
        logger.debug("Load Images: Start")
        load_queue = self.extractor.input_queue
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if idx % self.skip_num != 0:
                logger.trace("Skipping image '%s' due to extract_every_n = %s",
                             filename, self.skip_num)
                continue
            if image is None or not image.any():
                logger.warning("Unable to open image. Skipping: '%s'",
                               filename)
                continue
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                logger.trace("Skipping image: '%s'", filename)
                continue
            item = {"filename": filename, "image": image}
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        logger.debug("Reload Images: Start. Detected Faces Count: %s",
                     len(detected_faces))
        load_queue = self.extractor.input_queue
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def save_faces(self):
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        while True:
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            logger.trace(item)
            if item == "EOF":
                break
            filename, face = item

            logger.trace("Saving face: '%s'", filename)
            try:
                with open(filename, "wb") as out_file:
                    out_file.write(face)
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s",
                             filename, err)
                continue
        logger.debug("Save Faces: Complete")

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(
            os.path.basename(frame) in self.alignments.data.keys()
            for frame in self.images.input_images)
        logger.debug("Items already processed: %s", processed)

        if processed != 0 and self.args.skip_existing:
            logger.info("Skipping previously extracted frames: %s", processed)
        if processed != 0 and self.args.skip_faces:
            logger.info("Skipping frames with detected faces: %s", processed)

        to_process = (self.images.images_found - processed) // self.skip_num
        logger.debug("Items to be Processed: %s", to_process)
        if to_process == 0:
            logger.error("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_extraction(self):
        """ Run Face Detection """
        to_process = self.process_item_count()
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args,
                                                     "align_eyes") else False
        exception = False

        for phase in range(self.extractor.passes):
            if exception:
                break
            is_final = self.extractor.final_pass
            detected_faces = dict()
            self.extractor.launch()
            for idx, faces in enumerate(
                    tqdm(self.extractor.detected_faces(),
                         total=to_process,
                         file=sys.stdout,
                         desc="Running pass {} of {}: {}".format(
                             phase + 1, self.extractor.passes,
                             self.extractor.phase.title()))):

                exception = faces.get("exception", False)
                if exception:
                    break
                filename = faces["filename"]

                if self.extractor.final_pass:
                    self.output_processing(faces, align_eyes, size, filename)
                    self.output_faces(filename, faces)
                    if self.save_interval and (idx +
                                               1) % self.save_interval == 0:
                        self.alignments.save()
                else:
                    del faces["image"]
                    detected_faces[filename] = faces

            if is_final:
                logger.debug("Putting EOF to save")
                self.save_queue.put("EOF")
            else:
                logger.debug("Reloading images")
                self.threaded_io("reload", detected_faces)

    def output_processing(self, faces, align_eyes, size, filename):
        """ Prepare faces for output """
        self.align_face(faces, align_eyes, size, filename)
        self.post_process.do_actions(faces)

        faces_count = len(faces["detected_faces"])
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(filename))

        if not self.verify_output and faces_count > 1:
            self.verify_output = True

    def align_face(self, faces, align_eyes, size, filename):
        """ Align the detected face and add the destination file path """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_bounding_box(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.load_aligned(image, size=size, align_eyes=align_eyes)
            final_faces.append({
                "file_location":
                self.output_dir / Path(filename).stem,
                "face":
                detected_face
            })
        faces["detected_faces"] = final_faces

    def output_faces(self, filename, faces):
        """ Output faces to save thread """
        final_faces = list()
        for idx, detected_face in enumerate(faces["detected_faces"]):
            output_file = detected_face["file_location"]
            extension = Path(filename).suffix
            out_filename = "{}_{}{}".format(str(output_file), str(idx),
                                            extension)

            face = detected_face["face"]
            resized_face = face.aligned_face

            face.hash, img = hash_encode_image(resized_face, extension)
            self.save_queue.put((out_filename, img))
            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Esempio n. 20
0
class Convert(object):
    """ The convert process. """
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)

    def process(self):
        """ Original & LowMem models go with Adjust or Masked converter

            Note: GAN prediction outputs a mask + an image, while other
            predicts only an image. """
        Utils.set_verbosity(self.args.verbose)

        if not self.alignments.have_alignments_file:
            self.generate_alignments()

        self.faces.faces_detected = self.alignments.read_alignments()

        model = self.load_model()
        converter = self.load_converter(model)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        for item in batch.iterator():
            self.convert(converter, item)

        Utils.finalize(self.images.images_found, self.faces.num_faces_detected,
                       self.faces.verify_output)

    def generate_alignments(self):
        """ Generate an alignments file if one does not already
        exist. Does not save extracted faces """
        print('Alignments file not found. Generating at default values...')
        extract = Extract(self.args)
        extract.export_face = False
        extract.process()

    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print(
                "Model Not Found! A valid model must be provided to continue!")
            exit(1)

        return model

    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(
            model.converter(False),
            trainer=args.trainer,
            blur_size=args.blur_size,
            seamless_clone=args.seamless_clone,
            sharpen_image=args.sharpen_image,
            mask_type=args.mask_type,
            erosion_kernel_size=args.erosion_kernel_size,
            match_histogram=args.match_histogram,
            smooth_mask=args.smooth_mask,
            avg_color_adjust=args.avg_color_adjust)
        return converter

    def prepare_images(self):
        """ Prepare the images for conversion """
        filename = ""
        for filename in tqdm(self.images.input_images, file=sys.stdout):
            if not self.check_alignments(filename):
                continue
            image = Utils.cv2_read_write('read', filename)
            faces = self.faces.get_faces_alignments(filename, image)
            if not faces:
                continue

            yield filename, image, faces

    def check_alignments(self, filename):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.faces.have_face(filename)
        if not have_alignments:
            tqdm.write("No alignment found for {}, skipping".format(
                os.path.basename(filename)))
        return have_alignments

    def convert(self, converter, item):
        """ Apply the conversion transferring faces onto frames """
        try:
            filename, image, faces = item
            skip = self.opts.check_skipframe(filename)

            if not skip:
                for idx, face in faces:
                    image = self.convert_one_face(converter,
                                                  (filename, image, idx, face))
            if skip != "discard":
                filename = str(self.output_dir / Path(filename).name)
                Utils.cv2_read_write('write', filename, image)
        except Exception as err:
            print("Failed to convert image: {}. Reason: {}".format(
                filename, err))

    def convert_one_face(self, converter, imagevars):
        """ Perform the conversion on the given frame for a single face """
        filename, image, idx, face = imagevars

        if self.opts.check_skipface(filename, idx):
            return image

        image = self.images.rotate_image(image, face.r)
        # TODO: This switch between 64 and 128 is a hack for now.
        # We should have a separate cli option for size
        image = converter.patch_image(
            image, face, 64 if "128" not in self.args.trainer else 128)
        image = self.images.rotate_image(image, face.r, reverse=True)
        return image
Esempio n. 21
0
class Convert(object):
    """ The convert process. """
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)

    def process(self):
        """ Original & LowMem models go with Adjust or Masked converter

            Note: GAN prediction outputs a mask + an image, while other
            predicts only an image. """
        Utils.set_verbosity(self.args.verbose)

        if not self.alignments.have_alignments_file:
            self.generate_alignments()

        self.faces.faces_detected = self.alignments.read_alignments()

        model = self.load_model()
        converter = self.load_converter(model)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        for item in batch.iterator():
            self.convert(converter, item)

        Utils.finalize(self.images.images_found,
                       self.faces.num_faces_detected,
                       self.faces.verify_output)

    def generate_alignments(self):
        """ Generate an alignments file if one does not already
        exist. Does not save extracted faces """
        print('Alignments file not found. Generating at default values...')
        extract = Extract(self.args)
        extract.export_face = False
        extract.process()

    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print("Model Not Found! A valid model "
                  "must be provided to continue!")
            exit(1)

        return model

    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(
            model.converter(False),
            trainer=args.trainer,
            blur_size=args.blur_size,
            seamless_clone=args.seamless_clone,
            sharpen_image=args.sharpen_image,
            mask_type=args.mask_type,
            erosion_kernel_size=args.erosion_kernel_size,
            match_histogram=args.match_histogram,
            smooth_mask=args.smooth_mask,
            avg_color_adjust=args.avg_color_adjust)

        return converter

    def prepare_images(self):
        """ Prepare the images for conversion """
        filename = ""
        for filename in tqdm(self.images.input_images, file=sys.stdout):
            if not self.check_alignments(filename):
                continue
            image = Utils.cv2_read_write('read', filename)
            faces = self.faces.get_faces_alignments(filename, image)
            if not faces:
                continue

            yield filename, image, faces

    def check_alignments(self, filename):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.faces.have_face(filename)
        if not have_alignments:
            tqdm.write("No alignment found for {}, "
                       "skipping".format(os.path.basename(filename)))
        return have_alignments

    def convert(self, converter, item):
        """ Apply the conversion transferring faces onto frames """
        try:
            filename, image, faces = item
            skip = self.opts.check_skipframe(filename)

            if not skip:
                for idx, face in faces:
                    image = self.convert_one_face(converter,
                                                  (filename, image, idx, face))
            if skip != "discard":
                filename = str(self.output_dir / Path(filename).name)
                Utils.cv2_read_write('write', filename, image)
        except Exception as err:
            print("Failed to convert image: {}. "
                  "Reason: {}".format(filename, err))

    def convert_one_face(self, converter, imagevars):
        """ Perform the conversion on the given frame for a single face """
        filename, image, idx, face = imagevars

        if self.opts.check_skipface(filename, idx):
            return image

        image = self.images.rotate_image(image, face.r)
        # TODO: This switch between 64 and 128 is a hack for now.
        # We should have a separate cli option for size

        size = 128 if (self.args.trainer.strip().lower()
                       in ('gan128', 'originalhighres')) else 64

        image = converter.patch_image(image,
                                      face,
                                      size)
        image = self.images.rotate_image(image, face.r, reverse=True)
        return image
Esempio n. 22
0
class Convert():
    """ The convert process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        self.extractor = None
        self.faces_count = 0

        self.images = Images(self.args)
        self.alignments = Alignments(self.args, False, self.images.is_video)

        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images,
               arguments.input_aligned_dir)

        self.post_process = PostProcess(arguments)
        self.verify_output = False

        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)
        logger.debug("Initialized %s", self.__class__.__name__)

    def process(self):
        """ Original & LowMem models go with converter

            Note: GAN prediction outputs a mask + an image, while other
            predicts only an image. """
        Utils.set_verbosity(self.args.loglevel)

        if not self.alignments.have_alignments_file:
            self.load_extractor()

        model = self.load_model()
        converter = self.load_converter(model)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        for item in batch.iterator():
            self.convert(converter, item)

        if self.extractor:
            queue_manager.terminate_queues()

        Utils.finalize(self.images.images_found, self.faces_count,
                       self.verify_output)

    def load_extractor(self):
        """ Set on the fly extraction """
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning(
            "NB: This will use the inferior dlib-hog for extraction "
            "and dlib pose predictor for landmarks. It is recommended "
            "to perfom Extract first for superior results")
        extract_args = {
            "detector": "dlib-hog",
            "aligner": "dlib",
            "loglevel": self.args.loglevel
        }
        self.extractor = Extractor(None, extract_args)
        self.extractor.launch_detector()
        self.extractor.launch_aligner()

    def load_model(self):
        """ Load the model requested for conversion """
        logger.debug("Loading Model")
        model_dir = get_folder(self.args.model_dir)
        model = PluginLoader.get_model(self.args.trainer)(model_dir,
                                                          self.args.gpus,
                                                          predict=True)
        logger.debug("Loaded Model")
        return model

    def load_converter(self, model):
        """ Load the requested converter for conversion """
        conv = self.args.converter
        converter = PluginLoader.get_converter(conv)(model.converter(
            self.args.swap_model),
                                                     model=model,
                                                     arguments=self.args)
        return converter

    def prepare_images(self):
        """ Prepare the images for conversion """
        filename = ""
        if self.extractor:
            load_queue = queue_manager.get_queue("load")
        for filename, image in tqdm(self.images.load(),
                                    total=self.images.images_found,
                                    file=sys.stdout):

            if (self.args.discard_frames
                    and self.opts.check_skipframe(filename) == "discard"):
                continue

            frame = os.path.basename(filename)
            if self.extractor:
                detected_faces = self.detect_faces(load_queue, filename, image)
            else:
                detected_faces = self.alignments_faces(frame, image)

            faces_count = len(detected_faces)
            if faces_count != 0:
                # Post processing requires a dict with "detected_faces" key
                self.post_process.do_actions(
                    {"detected_faces": detected_faces})
                self.faces_count += faces_count

            if faces_count > 1:
                self.verify_output = True
                logger.verbose("Found more than one face in "
                               "an image! '%s'", frame)

            yield filename, image, detected_faces

    def detect_faces(self, load_queue, filename, image):
        """ Extract the face from a frame (If alignments file not found) """
        inp = {"filename": filename, "image": image}
        load_queue.put(inp)
        faces = next(self.extractor.detect_faces())

        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        final_faces = list()

        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_dlib_rect(face)
            detected_face.landmarksXY = landmarks[idx]
            final_faces.append(detected_face)
        return final_faces

    def alignments_faces(self, frame, image):
        """ Get the face from alignments file """
        if not self.check_alignments(frame):
            return list()

        faces = self.alignments.get_faces_in_frame(frame)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def check_alignments(self, frame):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.alignments.frame_exists(frame)
        if not have_alignments:
            tqdm.write("No alignment found for {}, " "skipping".format(frame))
        return have_alignments

    def convert(self, converter, item):
        """ Apply the conversion transferring faces onto frames """
        try:
            filename, image, faces = item
            skip = self.opts.check_skipframe(filename)

            if not skip:
                for face in faces:
                    image = converter.patch_image(image, face)
                filename = str(self.output_dir / Path(filename).name)

                if self.args.draw_transparent:
                    filename = "{}.png".format(os.path.splitext(filename)[0])
                    logger.trace("Set extension to png: `%s`", filename)

                cv2.imwrite(filename, image)  # pylint: disable=no-member
        except Exception as err:
            logger.error("Failed to convert image: '%s'. Reason: %s", filename,
                         err)
            raise