Example #1
0
    def __init__(self, alignments, images, arguments):
        logger.debug("Initializing %s: (alignments: %s, images: %s, arguments: %s)",
                     self.__class__.__name__, alignments, images, arguments)
        self.alignments = alignments
        self.images = images
        self.args = arguments
        self.pre_process = PostProcess(arguments)
        self.completion_event = Event()
        self.frame_ranges = self.get_frame_ranges()
        self.writer = self.get_writer()

        # For frame skipping
        self.imageidxre = re.compile(r"(\d+)(?!.*\d\.)(?=\.\w+$)")

        # Extractor for on the fly detection
        self.extractor = None
        if not self.alignments.have_alignments_file:
            self.load_extractor()

        self.load_queue = None
        self.save_queue = None
        self.load_thread = None
        self.save_thread = None
        self.init_threads()
        logger.debug("Initialized %s", self.__class__.__name__)
Example #2
0
 def __init__(self, arguments):
     logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                  arguments)
     self.args = arguments
     Utils.set_verbosity(self.args.loglevel)
     self.output_dir = get_folder(self.args.output_dir)
     logger.info("Output Directory: %s", self.args.output_dir)
     self.images = Images(self.args)
     self.alignments = Alignments(self.args, True, self.images.is_video)
     self.post_process = PostProcess(arguments)
     configfile = self.args.configfile if hasattr(self.args,
                                                  "configfile") else None
     normalization = None if self.args.normalization == "none" else self.args.normalization
     self.extractor = Extractor(self.args.detector,
                                self.args.aligner,
                                self.args.loglevel,
                                configfile=configfile,
                                multiprocess=not self.args.singleprocess,
                                rotate_images=self.args.rotate_images,
                                min_size=self.args.min_size,
                                normalize_method=normalization)
     self.save_queue = queue_manager.get_queue("extract_save")
     self.verify_output = False
     self.save_interval = None
     if hasattr(self.args, "save_interval"):
         self.save_interval = self.args.save_interval
     logger.debug("Initialized %s", self.__class__.__name__)
Example #3
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self._args = arguments
        self._output_dir = None if self._args.skip_saving_faces else get_folder(
            self._args.output_dir)

        logger.info("Output Directory: %s", self._args.output_dir)
        self._images = ImagesLoader(self._args.input_dir, fast_count=True)
        self._alignments = Alignments(self._args, True, self._images.is_video)

        self._existing_count = 0
        self._set_skip_list()

        self._post_process = PostProcess(arguments)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        normalization = None if self._args.normalization == "none" else self._args.normalization

        maskers = ["components", "extended"]
        maskers += self._args.masker if self._args.masker else []
        self._extractor = Extractor(self._args.detector,
                                    self._args.aligner,
                                    maskers,
                                    configfile=configfile,
                                    multiprocess=not self._args.singleprocess,
                                    exclude_gpus=self._args.exclude_gpus,
                                    rotate_images=self._args.rotate_images,
                                    min_size=self._args.min_size,
                                    normalize_method=normalization,
                                    re_feed=self._args.re_feed)
        self._threads = list()
        self._verify_output = False
        logger.debug("Initialized %s", self.__class__.__name__)
Example #4
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._output_dir = str(get_folder(self._args.output_dir))

        logger.info("Output Directory: %s", self._args.output_dir)
        self._images = ImagesLoader(self._args.input_dir,
                                    load_with_hash=False,
                                    fast_count=True)
        self._alignments = Alignments(self._args, True, self._images.is_video)

        self._existing_count = 0
        self._set_skip_list()

        self._post_process = PostProcess(arguments)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        normalization = None if self._args.normalization == "none" else self._args.normalization
        self._extractor = Extractor(self._args.detector,
                                    self._args.aligner,
                                    self._args.masker,
                                    configfile=configfile,
                                    multiprocess=not self._args.singleprocess,
                                    rotate_images=self._args.rotate_images,
                                    min_size=self._args.min_size,
                                    normalize_method=normalization)
        self._threads = list()
        self._verify_output = False
        logger.debug("Initialized %s", self.__class__.__name__)
Example #5
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__, arguments)
        self._args = arguments

        self.batch = list()

        self._serializer = get_serializer("json")
        self._pre_process = PostProcess(arguments)
        self._writer = self._get_writer()
        self._extractor = self._load_extractor()

        self._batchsize = self._get_batchsize(self._queue_size)
        self._model = self._load_model()
        self._output_indices = {"face": self._model.largest_face_index,
                                "mask": self._model.largest_mask_index}

        self._predictor = self._model.converter(False)

        configfile = self._args.configfile if hasattr(self._args, "configfile") else None
        self._converter = Converter(self.output_size,
                                    self.coverage_ratio,
                                    self.draw_transparent,
                                    self.pre_encode,
                                    arguments,
                                    configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)
Example #6
0
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        print("Output Directory: {}".format(self.args.output_dir))
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True)
        self.plugins = Plugins(self.args)

        self.post_process = PostProcess(arguments)

        self.export_face = True
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
Example #7
0
File: convert.py Project: aejot/fs
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        self.extract_faces = False
        self.faces_count = 0

        self.images = Images(self.args)
        self.alignments = Alignments(self.args, False)

        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images)

        self.post_process = PostProcess(arguments)
        self.verify_output = False

        self.opts = OptionalActions(self.args, self.images.input_images)
Example #8
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.plugins = Plugins(self.args)

        self.post_process = PostProcess(arguments)

        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)
Example #9
0
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug(
            "Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
            self.__class__.__name__, arguments, queue_size, in_queue)
        self.batchsize = min(queue_size, 16)
        self.args = arguments
        self.in_queue = in_queue
        self.out_queue = queue_manager.get_queue("patch")
        self.serializer = Serializer.get_serializer("json")
        self.faces_count = 0
        self.verify_output = False
        self.pre_process = PostProcess(arguments)
        self.model = self.load_model()
        self.predictor = self.model.converter(self.args.swap_model)
        self.queues = dict()

        self.thread = MultiThread(self.predict_faces, thread_count=1)
        self.thread.start()
        logger.debug("Initialized %s: (out_queue: %s)",
                     self.__class__.__name__, self.out_queue)
Example #10
0
    def __init__(self, alignments, images, arguments):
        logger.debug("Initializing %s: (alignments: %s, images: %s, arguments: %s)",
                     self.__class__.__name__, alignments, images, arguments)
        self._alignments = alignments
        self._images = images
        self._args = arguments
        self._pre_process = PostProcess(arguments)
        self._completion_event = Event()

        # For frame skipping
        self._imageidxre = re.compile(r"(\d+)(?!.*\d\.)(?=\.\w+$)")
        self._frame_ranges = self._get_frame_ranges()
        self._writer = self._get_writer()

        # Extractor for on the fly detection
        self._extractor = self._load_extractor()

        self._queues = dict(load=None, save=None)
        self._threads = dict(oad=None, save=None)
        self._init_threads()
        logger.debug("Initialized %s", self.__class__.__name__)
Example #11
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        self.extract_faces = False
        self.faces_count = 0

        self.images = Images(self.args)
        self.alignments = Alignments(self.args, False, self.images.is_video)

        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images,
               arguments.input_aligned_dir)

        self.post_process = PostProcess(arguments)
        self.verify_output = False

        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)
        logger.debug("Initialized %s", self.__class__.__name__)
Example #12
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector,
                                   self.args.aligner,
                                   self.args.loglevel,
                                   self.args.multiprocess,
                                   self.args.rotate_images,
                                   self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)
Example #13
0
    def __init__(self, alignments, images, arguments):
        logger.debug("Initializing %s: (alignments: %s, images: %s, arguments: %s)",
                     self.__class__.__name__, alignments, images, arguments)
        self.alignments = alignments
        self.images = images
        self.args = arguments
        self.pre_process = PostProcess(arguments)
        self.completion_event = Event()
        self.frame_ranges = self.get_frame_ranges()
        self.writer = self.get_writer()

        # For frame skipping
        self.imageidxre = re.compile(r"(\d+)(?!.*\d\.)(?=\.\w+$)")

        # Extractor for on the fly detection
        self.extractor = self.load_extractor()

        self.load_queue = None
        self.save_queue = None
        self.load_thread = None
        self.save_thread = None
        self.init_threads()
        logger.debug("Initialized %s", self.__class__.__name__)
Example #14
0
class DiskIO():
    """ Background threads to:
            Load images from disk and get the detected faces
            Save images back to disk """
    def __init__(self, alignments, images, arguments):
        logger.debug(
            "Initializing %s: (alignments: %s, images: %s, arguments: %s)",
            self.__class__.__name__, alignments, images, arguments)
        self.alignments = alignments
        self.images = images
        self.args = arguments
        self.pre_process = PostProcess(arguments)
        self.completion_event = Event()

        # For frame skipping
        self.imageidxre = re.compile(r"(\d+)(?!.*\d\.)(?=\.\w+$)")
        self.frame_ranges = self.get_frame_ranges()
        self.writer = self.get_writer()

        # Extractor for on the fly detection
        self.extractor = self.load_extractor()

        self.load_queue = None
        self.save_queue = None
        self.load_thread = None
        self.save_thread = None
        self.init_threads()
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def draw_transparent(self):
        """ Draw transparent is an image writer only parameter.
            Return the value here for easy access for predictor """
        return self.writer.config.get("draw_transparent", False)

    @property
    def pre_encode(self):
        """ Return the writer's pre-encoder """
        dummy = np.zeros((20, 20, 3), dtype="uint8")
        test = self.writer.pre_encode(dummy)
        retval = None if test is None else self.writer.pre_encode
        logger.debug("Writer pre_encode function: %s", retval)
        return retval

    @property
    def total_count(self):
        """ Return the total number of frames to be converted """
        if self.frame_ranges and not self.args.keep_unchanged:
            retval = sum([fr[1] - fr[0] + 1 for fr in self.frame_ranges])
        else:
            retval = self.images.images_found
        logger.debug(retval)
        return retval

    # Initalization
    def get_writer(self):
        """ Return the writer plugin """
        args = [self.args.output_dir]
        if self.args.writer in ("ffmpeg", "gif"):
            args.extend([self.total_count, self.frame_ranges])
        if self.args.writer == "ffmpeg":
            if self.images.is_video:
                args.append(self.args.input_dir)
            else:
                args.append(self.args.reference_video)
        logger.debug("Writer args: %s", args)
        configfile = self.args.configfile if hasattr(self.args,
                                                     "configfile") else None
        return PluginLoader.get_converter("writer", self.args.writer)(
            *args, configfile=configfile)

    def get_frame_ranges(self):
        """ split out the frame ranges and parse out 'min' and 'max' values """
        if not self.args.frame_ranges:
            logger.debug("No frame range set")
            return None

        minframe, maxframe = None, None
        if self.images.is_video:
            minframe, maxframe = 1, self.images.images_found
        else:
            indices = [
                int(self.imageidxre.findall(os.path.basename(filename))[0])
                for filename in self.images.input_images
            ]
            if indices:
                minframe, maxframe = min(indices), max(indices)
        logger.debug("minframe: %s, maxframe: %s", minframe, maxframe)

        if minframe is None or maxframe is None:
            raise FaceswapError(
                "Frame Ranges specified, but could not determine frame numbering "
                "from filenames")

        retval = list()
        for rng in self.args.frame_ranges:
            if "-" not in rng:
                raise FaceswapError(
                    "Frame Ranges not specified in the correct format")
            start, end = rng.split("-")
            retval.append((max(int(start), minframe), min(int(end), maxframe)))
        logger.debug("frame ranges: %s", retval)
        return retval

    def load_extractor(self):
        """ Set on the fly extraction """
        if self.alignments.have_alignments_file:
            return None

        logger.debug("Loading extractor")
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning(
            "NB: This will use the inferior cv2-dnn for extraction "
            "and  landmarks. It is recommended to perfom Extract first for "
            "superior results")
        extractor = Extractor(detector="cv2-dnn",
                              aligner="cv2-dnn",
                              masker="none",
                              multiprocess=True,
                              rotate_images=None,
                              min_size=20)
        extractor.launch()
        logger.debug("Loaded extractor")
        return extractor

    def init_threads(self):
        """ Initialize queues and threads """
        logger.debug("Initializing DiskIO Threads")
        for task in ("load", "save"):
            self.add_queue(task)
            self.start_thread(task)
        logger.debug("Initialized DiskIO Threads")

    def add_queue(self, task):
        """ Add the queue to queue_manager and set queue attribute """
        logger.debug("Adding queue for task: '%s'", task)
        if task == "load":
            q_name = "convert_in"
        elif task == "save":
            q_name = "convert_out"
        else:
            q_name = task
        setattr(self, "{}_queue".format(task), queue_manager.get_queue(q_name))
        logger.debug("Added queue for task: '%s'", task)

    def start_thread(self, task):
        """ Start the DiskIO thread """
        logger.debug("Starting thread: '%s'", task)
        args = self.completion_event if task == "save" else None
        func = getattr(self, task)
        io_thread = MultiThread(func, args, thread_count=1)
        io_thread.start()
        setattr(self, "{}_thread".format(task), io_thread)
        logger.debug("Started thread: '%s'", task)

    # Loading tasks
    def load(self, *args):  # pylint: disable=unused-argument
        """ Load the images with detected_faces"""
        logger.debug("Load Images: Start")
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if self.load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if image is None or (not image.any() and image.ndim not in (2, 3)):
                # All black frames will return not np.any() so check dims too
                logger.warning("Unable to open image. Skipping: '%s'",
                               filename)
                continue
            if self.check_skipframe(filename):
                if self.args.keep_unchanged:
                    logger.trace("Saving unchanged frame: %s", filename)
                    out_file = os.path.join(self.args.output_dir,
                                            os.path.basename(filename))
                    self.save_queue.put((out_file, image))
                else:
                    logger.trace("Discarding frame: '%s'", filename)
                continue

            detected_faces = self.get_detected_faces(filename, image)
            item = dict(filename=filename,
                        image=image,
                        detected_faces=detected_faces)
            self.pre_process.do_actions(item)
            self.load_queue.put(item)

        logger.debug("Putting EOF")
        self.load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def check_skipframe(self, filename):
        """ Check whether frame is to be skipped """
        if not self.frame_ranges:
            return None
        indices = self.imageidxre.findall(filename)
        if not indices:
            logger.warning(
                "Could not determine frame number. Frame will be converted: '%s'",
                filename)
            return False
        idx = int(indices[0]) if indices else None
        skipframe = not any(
            map(lambda b: b[0] <= idx <= b[1], self.frame_ranges))
        logger.trace("idx: %s, skipframe: %s", idx, skipframe)
        return skipframe

    def get_detected_faces(self, filename, image):
        """ Return detected faces from alignments or detector """
        logger.trace("Getting faces for: '%s'", filename)
        if not self.extractor:
            detected_faces = self.alignments_faces(os.path.basename(filename),
                                                   image)
        else:
            detected_faces = self.detect_faces(filename, image)
        logger.trace("Got %s faces for: '%s'", len(detected_faces), filename)
        return detected_faces

    def alignments_faces(self, frame, image):
        """ Get the face from alignments file """
        if not self.check_alignments(frame):
            return list()

        faces = self.alignments.get_faces_in_frame(frame)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def check_alignments(self, frame):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.alignments.frame_exists(frame)
        if not have_alignments:
            tqdm.write("No alignment found for {}, " "skipping".format(frame))
        return have_alignments

    def detect_faces(self, filename, image):
        """ Extract the face from a frame (If alignments file not found) """
        inp = {"filename": filename, "image": image}
        self.extractor.input_queue.put(inp)
        faces = next(self.extractor.detected_faces())

        final_faces = [face for face in faces["detected_faces"]]
        return final_faces

    # Saving tasks
    def save(self, completion_event):
        """ Save the converted images """
        logger.debug("Save Images: Start")
        write_preview = self.args.redirect_gui and self.writer.is_stream
        preview_image = os.path.join(self.writer.output_folder,
                                     ".gui_preview.jpg")
        logger.debug("Write preview for gui: %s", write_preview)
        for idx in tqdm(range(self.total_count),
                        desc="Converting",
                        file=sys.stdout):
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            if item == "EOF":
                logger.debug("EOF Received")
                break
            filename, image = item
            # Write out preview image for the GUI every 10 frames if writing to stream
            if write_preview and idx % 10 == 0 and not os.path.exists(
                    preview_image):
                logger.debug("Writing GUI Preview image: '%s'", preview_image)
                imwrite(preview_image, image)
            self.writer.write(filename, image)
        self.writer.close()
        completion_event.set()
        logger.debug("Save Faces: Complete")
Example #15
0
class DiskIO():
    """ Background threads to:
            Load images from disk and get the detected faces
            Save images back to disk """
    def __init__(self, alignments, images, arguments):
        logger.debug("Initializing %s: (alignments: %s, images: %s, arguments: %s)",
                     self.__class__.__name__, alignments, images, arguments)
        self.alignments = alignments
        self.images = images
        self.args = arguments
        self.pre_process = PostProcess(arguments)
        self.completion_event = Event()
        self.frame_ranges = self.get_frame_ranges()
        self.writer = self.get_writer()

        # For frame skipping
        self.imageidxre = re.compile(r"(\d+)(?!.*\d\.)(?=\.\w+$)")

        # Extractor for on the fly detection
        self.extractor = self.load_extractor()

        self.load_queue = None
        self.save_queue = None
        self.load_thread = None
        self.save_thread = None
        self.init_threads()
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def draw_transparent(self):
        """ Draw transparent is an image writer only parameter.
            Return the value here for easy access for predictor """
        return self.writer.config.get("draw_transparent", False)

    @property
    def pre_encode(self):
        """ Return the writer's pre-encoder """
        dummy = np.zeros((20, 20, 3)).astype("uint8")
        test = self.writer.pre_encode(dummy)
        retval = None if test is None else self.writer.pre_encode
        logger.debug("Writer pre_encode function: %s", retval)
        return retval

    @property
    def total_count(self):
        """ Return the total number of frames to be converted """
        if self.frame_ranges and not self.args.keep_unchanged:
            retval = sum([fr[1] - fr[0] + 1 for fr in self.frame_ranges])
        else:
            retval = self.images.images_found
        logger.debug(retval)
        return retval

    # Initalization
    def get_writer(self):
        """ Return the writer plugin """
        args = [self.args.output_dir]
        if self.args.writer in ("ffmpeg", "gif"):
            args.append(self.total_count)
        if self.args.writer == "ffmpeg":
            if self.images.is_video:
                args.append(self.args.input_dir)
            else:
                args.append(self.args.reference_video)
        logger.debug("Writer args: %s", args)
        return PluginLoader.get_converter("writer", self.args.writer)(*args)

    def get_frame_ranges(self):
        """ split out the frame ranges and parse out 'min' and 'max' values """
        if not self.args.frame_ranges:
            logger.debug("No frame range set")
            return None

        minmax = {"min": 0,  # never any frames less than 0
                  "max": float("inf")}
        retval = [tuple(map(lambda q: minmax[q] if q in minmax.keys() else int(q), v.split("-")))
                  for v in self.args.frame_ranges]
        logger.debug("frame ranges: %s", retval)
        return retval

    def load_extractor(self):
        """ Set on the fly extraction """
        if self.alignments.have_alignments_file:
            return None

        logger.debug("Loading extractor")
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning("NB: This will use the inferior cv2-dnn for extraction "
                       "and  landmarks. It is recommended to perfom Extract first for "
                       "superior results")
        extractor = Extractor(detector="cv2-dnn",
                              aligner="cv2-dnn",
                              loglevel=self.args.loglevel,
                              multiprocess=False,
                              rotate_images=None,
                              min_size=20)
        extractor.launch()
        logger.debug("Loaded extractor")
        return extractor

    def init_threads(self):
        """ Initialize queues and threads """
        logger.debug("Initializing DiskIO Threads")
        for task in ("load", "save"):
            self.add_queue(task)
            self.start_thread(task)
        logger.debug("Initialized DiskIO Threads")

    def add_queue(self, task):
        """ Add the queue to queue_manager and set queue attribute """
        logger.debug("Adding queue for task: '%s'", task)
        if task == "load":
            q_name = "convert_in"
        elif task == "save":
            q_name = "convert_out"
        else:
            q_name = task
        setattr(self, "{}_queue".format(task), queue_manager.get_queue(q_name))
        logger.debug("Added queue for task: '%s'", task)

    def start_thread(self, task):
        """ Start the DiskIO thread """
        logger.debug("Starting thread: '%s'", task)
        args = self.completion_event if task == "save" else None
        func = getattr(self, task)
        io_thread = MultiThread(func, args, thread_count=1)
        io_thread.start()
        setattr(self, "{}_thread".format(task), io_thread)
        logger.debug("Started thread: '%s'", task)

    # Loading tasks
    def load(self, *args):  # pylint: disable=unused-argument
        """ Load the images with detected_faces"""
        logger.debug("Load Images: Start")
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if self.load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if image is None or not image.any():
                logger.warning("Unable to open image. Skipping: '%s'", filename)
                continue
            if self.check_skipframe(filename):
                if self.args.keep_unchanged:
                    logger.trace("Saving unchanged frame: %s", filename)
                    out_file = os.path.join(self.args.output_dir, os.path.basename(filename))
                    self.save_queue.put((out_file, image))
                else:
                    logger.trace("Discarding frame: '%s'", filename)
                continue

            detected_faces = self.get_detected_faces(filename, image)
            item = dict(filename=filename, image=image, detected_faces=detected_faces)
            self.pre_process.do_actions(item)
            self.load_queue.put(item)

        self.load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def check_skipframe(self, filename):
        """ Check whether frame is to be skipped """
        if not self.frame_ranges:
            return None
        indices = self.imageidxre.findall(filename)
        if not indices:
            logger.warning("Could not determine frame number. Frame will be converted: '%s'",
                           filename)
            return False
        idx = int(indices[0]) if indices else None
        skipframe = not any(map(lambda b: b[0] <= idx <= b[1], self.frame_ranges))
        return skipframe

    def get_detected_faces(self, filename, image):
        """ Return detected faces from alignments or detector """
        logger.trace("Getting faces for: '%s'", filename)
        if not self.extractor:
            detected_faces = self.alignments_faces(os.path.basename(filename), image)
        else:
            detected_faces = self.detect_faces(filename, image)
        logger.trace("Got %s faces for: '%s'", len(detected_faces), filename)
        return detected_faces

    def alignments_faces(self, frame, image):
        """ Get the face from alignments file """
        if not self.check_alignments(frame):
            return list()

        faces = self.alignments.get_faces_in_frame(frame)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def check_alignments(self, frame):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.alignments.frame_exists(frame)
        if not have_alignments:
            tqdm.write("No alignment found for {}, "
                       "skipping".format(frame))
        return have_alignments

    def detect_faces(self, filename, image):
        """ Extract the face from a frame (If alignments file not found) """
        inp = {"filename": filename,
               "image": image}
        self.extractor.input_queue.put(inp)
        faces = next(self.extractor.detected_faces())

        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        final_faces = list()

        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_bounding_box(face)
            detected_face.landmarksXY = landmarks[idx]
            final_faces.append(detected_face)
        return final_faces

    # Saving tasks
    def save(self, completion_event):
        """ Save the converted images """
        logger.debug("Save Images: Start")
        for _ in tqdm(range(self.total_count), desc="Converting", file=sys.stdout):
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            if item == "EOF":
                break
            filename, image = item
            self.writer.write(filename, image)
        self.writer.close()
        completion_event.set()
        logger.debug("Save Faces: Complete")
Example #16
0
class Extract():  # pylint:disable=too-few-public-methods
    """ The Faceswap Face Extraction Process.

    The extraction process is responsible for detecting faces in a series of images/video, aligning
    these faces and then generating a mask.

    It leverages a series of user selected plugins, chained together using
    :mod:`plugins.extract.pipeline`.

    The extract process is self contained and should not be referenced by any other scripts, so it
    contains no public properties.

    Parameters
    ----------
    arguments: argparse.Namespace
        The arguments to be passed to the extraction process as generated from Faceswap's command
        line arguments
    """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._output_dir = str(get_folder(self._args.output_dir))

        logger.info("Output Directory: %s", self._args.output_dir)
        self._images = ImagesLoader(self._args.input_dir,
                                    load_with_hash=False,
                                    fast_count=True)
        self._alignments = Alignments(self._args, True, self._images.is_video)

        self._existing_count = 0
        self._set_skip_list()

        self._post_process = PostProcess(arguments)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        normalization = None if self._args.normalization == "none" else self._args.normalization
        self._extractor = Extractor(self._args.detector,
                                    self._args.aligner,
                                    self._args.masker,
                                    configfile=configfile,
                                    multiprocess=not self._args.singleprocess,
                                    rotate_images=self._args.rotate_images,
                                    min_size=self._args.min_size,
                                    normalize_method=normalization)
        self._threads = list()
        self._verify_output = False
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def _save_interval(self):
        """ int: The number of frames to be processed between each saving of the alignments file if
        it has been provided, otherwise ``None`` """
        if hasattr(self._args, "save_interval"):
            return self._args.save_interval
        return None

    @property
    def _skip_num(self):
        """ int: Number of frames to skip if extract_every_n has been provided """
        return self._args.extract_every_n if hasattr(self._args,
                                                     "extract_every_n") else 1

    def _set_skip_list(self):
        """ Add the skip list to the image loader

        Checks against `extract_every_n` and the existence of alignments data (can exist if
        `skip_existing` or `skip_existing_faces` has been provided) and compiles a list of frame
        indices that should not be processed, providing these to :class:`lib.image.ImagesLoader`.
        """
        if self._skip_num == 1 and not self._alignments.data:
            logger.debug("No frames to be skipped")
            return
        skip_list = []
        for idx, filename in enumerate(self._images.file_list):
            if idx % self._skip_num != 0:
                logger.trace(
                    "Adding image '%s' to skip list due to extract_every_n = %s",
                    filename, self._skip_num)
                skip_list.append(idx)
            # Items may be in the alignments file if skip-existing[-faces] is selected
            elif os.path.basename(filename) in self._alignments.data:
                self._existing_count += 1
                logger.trace("Removing image: '%s' due to previously existing",
                             filename)
                skip_list.append(idx)
        if self._existing_count != 0:
            logger.info(
                "Skipping %s frames due to skip_existing/skip_existing_faces.",
                self._existing_count)
        logger.debug("Adding skip list: %s", skip_list)
        self._images.add_skip_list(skip_list)

    def process(self):
        """ The entry point for triggering the Extraction Process.

        Should only be called from  :class:`lib.cli.ScriptExecutor`
        """
        logger.info('Starting, this may take a while...')
        # from lib.queue_manager import queue_manager ; queue_manager.debug_monitor(3)
        self._threaded_redirector("load")
        self._run_extraction()
        for thread in self._threads:
            thread.join()
        self._alignments.save()
        finalize(self._images.process_count + self._existing_count,
                 self._alignments.faces_count, self._verify_output)

    def _threaded_redirector(self, task, io_args=None):
        """ Redirect image input/output tasks to relevant queues in background thread

        Parameters
        ----------
        task: str
            The name of the task to be put into a background thread
        io_args: tuple, optional
            Any arguments that need to be provided to the background function
        """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        func = getattr(self, "_{}".format(task))
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        self._threads.append(io_thread)

    def _load(self):
        """ Load the images

        Loads images from :class:`lib.image.ImagesLoader`, formats them into a dict compatible
        with :class:`plugins.extract.Pipeline.Extractor` and passes them into the extraction queue.
        """
        logger.debug("Load Images: Start")
        load_queue = self._extractor.input_queue
        for filename, image in self._images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            item = ExtractMedia(filename, image[..., :3])
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def _reload(self, detected_faces):
        """ Reload the images and pair to detected face

        When the extraction pipeline is running in serial mode, images are reloaded from disk,
        paired with their extraction data and passed back into the extraction queue

        Parameters
        ----------
        detected_faces: dict
            Dictionary of :class:`plugins.extract.pipeline.ExtractMedia` with the filename as the
            key for repopulating the image attribute.
        """
        logger.debug("Reload Images: Start. Detected Faces Count: %s",
                     len(detected_faces))
        load_queue = self._extractor.input_queue
        for filename, image in self._images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            extract_media = detected_faces.pop(filename, None)
            if not extract_media:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            extract_media.set_image(image)
            load_queue.put(extract_media)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def _run_extraction(self):
        """ The main Faceswap Extraction process

        Receives items from :class:`plugins.extract.Pipeline.Extractor` and either saves out the
        faces and data (if on the final pass) or reprocesses data through the pipeline for serial
        processing.
        """
        size = self._args.size if hasattr(self._args, "size") else 256
        saver = ImagesSaver(self._output_dir, as_bytes=True)
        exception = False
        phase_desc = "Extraction"

        for phase in range(self._extractor.passes):
            if exception:
                break
            is_final = self._extractor.final_pass
            detected_faces = dict()
            self._extractor.launch()
            self._check_thread_error()
            if self._args.singleprocess:
                phase_desc = self._extractor.phase.title()
            desc = "Running pass {} of {}: {}".format(phase + 1,
                                                      self._extractor.passes,
                                                      phase_desc)
            status_bar = tqdm(self._extractor.detected_faces(),
                              total=self._images.process_count,
                              file=sys.stdout,
                              desc=desc)
            for idx, extract_media in enumerate(status_bar):
                self._check_thread_error()
                if is_final:
                    self._output_processing(extract_media, size)
                    self._output_faces(saver, extract_media)
                    if self._save_interval and (idx +
                                                1) % self._save_interval == 0:
                        self._alignments.save()
                else:
                    extract_media.remove_image()
                    # cache extract_media for next run
                    detected_faces[extract_media.filename] = extract_media
                status_bar.update(1)

            if not is_final:
                logger.debug("Reloading images")
                self._threaded_redirector("reload", detected_faces)
        saver.close()

    def _check_thread_error(self):
        """ Check if any errors have occurred in the running threads and their errors """
        for thread in self._threads:
            thread.check_and_raise_error()

    def _output_processing(self, extract_media, size):
        """ Prepare faces for output

        Loads the aligned face, perform any processing actions and verify the output.

        Parameters
        ----------
        extract_media: :class:`plugins.extract.pipeline.ExtractMedia`
            Output from :class:`plugins.extract.pipeline.Extractor`
        size: int
            The size that the aligned face should be created at
        """
        for face in extract_media.detected_faces:
            face.load_aligned(extract_media.image, size=size)

        self._post_process.do_actions(extract_media)
        extract_media.remove_image()

        faces_count = len(extract_media.detected_faces)
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(extract_media.filename))

        if not self._verify_output and faces_count > 1:
            self._verify_output = True

    def _output_faces(self, saver, extract_media):
        """ Output faces to save thread

        Set the face filename based on the frame name and put the face to the
        :class:`~lib.image.ImagesSaver` save queue and add the face information to the alignments
        data.

        Parameters
        ----------
        saver: lib.images.ImagesSaver
            The background saver for saving the image
        extract_media: :class:`~plugins.extract.pipeline.ExtractMedia`
            The output from :class:`~plugins.extract.Pipeline.Extractor`
        """
        logger.trace("Outputting faces for %s", extract_media.filename)
        final_faces = list()
        filename, extension = os.path.splitext(
            os.path.basename(extract_media.filename))
        for idx, face in enumerate(extract_media.detected_faces):
            output_filename = "{}_{}{}".format(filename, str(idx), extension)
            face.hash, image = encode_image_with_hash(face.aligned_face,
                                                      extension)

            saver.save(output_filename, image)
            final_faces.append(face.to_alignment())
        self._alignments.data[os.path.basename(
            extract_media.filename)] = dict(faces=final_faces)
        del extract_media
Example #17
0
class DiskIO():
    """ Disk Input/Output for the converter process.

    Background threads to:
        * Load images from disk and get the detected faces
        * Save images back to disk

    Parameters
    ----------
    alignments: :class:`lib.alignmnents.Alignments`
        The alignments for the input video
    images: :class:`lib.image.ImagesLoader`
        The input images
    arguments: :class:`argparse.Namespace`
        The arguments that were passed to the convert process as generated from Faceswap's command
        line arguments
    """
    def __init__(self, alignments, images, arguments):
        logger.debug(
            "Initializing %s: (alignments: %s, images: %s, arguments: %s)",
            self.__class__.__name__, alignments, images, arguments)
        self._alignments = alignments
        self._images = images
        self._args = arguments
        self._pre_process = PostProcess(arguments)
        self._completion_event = Event()

        # For frame skipping
        self._imageidxre = re.compile(r"(\d+)(?!.*\d\.)(?=\.\w+$)")
        self._frame_ranges = self._get_frame_ranges()
        self._writer = self._get_writer()

        # Extractor for on the fly detection
        self._extractor = self._load_extractor()

        self._queues = dict(load=None, save=None)
        self._threads = dict(oad=None, save=None)
        self._init_threads()
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def completion_event(self):
        """ :class:`event.Event`: Event is set when the DiskIO Save task is complete """
        return self._completion_event

    @property
    def draw_transparent(self):
        """ bool: ``True`` if the selected writer's Draw_transparent configuration item is set
        otherwise ``False`` """
        return self._writer.config.get("draw_transparent", False)

    @property
    def pre_encode(self):
        """ python function: Selected writer's pre-encode function, if it has one,
        otherwise ``None`` """
        dummy = np.zeros((20, 20, 3), dtype="uint8")
        test = self._writer.pre_encode(dummy)
        retval = None if test is None else self._writer.pre_encode
        logger.debug("Writer pre_encode function: %s", retval)
        return retval

    @property
    def save_thread(self):
        """ :class:`lib.multithreading.MultiThread`: The thread that is running the image writing
        operation. """
        return self._threads["save"]

    @property
    def load_thread(self):
        """ :class:`lib.multithreading.MultiThread`: The thread that is running the image loading
        operation. """
        return self._threads["load"]

    @property
    def load_queue(self):
        """ :class:`queue.Queue()`: The queue that images and detected faces are loaded into. """
        return self._queues["load"]

    @property
    def _total_count(self):
        """ int: The total number of frames to be converted """
        if self._frame_ranges and not self._args.keep_unchanged:
            retval = sum([fr[1] - fr[0] + 1 for fr in self._frame_ranges])
        else:
            retval = self._images.count
        logger.debug(retval)
        return retval

    # Initialization
    def _get_writer(self):
        """ Load the selected writer plugin.

        Returns
        -------
        :mod:`plugins.convert.writer` plugin
            The requested writer plugin
        """
        args = [self._args.output_dir]
        if self._args.writer in ("ffmpeg", "gif"):
            args.extend([self._total_count, self._frame_ranges])
        if self._args.writer == "ffmpeg":
            if self._images.is_video:
                args.append(self._args.input_dir)
            else:
                args.append(self._args.reference_video)
        logger.debug("Writer args: %s", args)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        return PluginLoader.get_converter("writer", self._args.writer)(
            *args, configfile=configfile)

    def _get_frame_ranges(self):
        """ Obtain the frame ranges that are to be converted.

        If frame ranges have been specified, then split the command line formatted arguments into
        ranges that can be used.

        Returns
        list or ``None``
            A list of  frames to be processed, or ``None`` if the command line argument was not
            used
        """
        if not self._args.frame_ranges:
            logger.debug("No frame range set")
            return None

        minframe, maxframe = None, None
        if self._images.is_video:
            minframe, maxframe = 1, self._images.count
        else:
            indices = [
                int(self._imageidxre.findall(os.path.basename(filename))[0])
                for filename in self._images.file_list
            ]
            if indices:
                minframe, maxframe = min(indices), max(indices)
        logger.debug("minframe: %s, maxframe: %s", minframe, maxframe)

        if minframe is None or maxframe is None:
            raise FaceswapError(
                "Frame Ranges specified, but could not determine frame numbering "
                "from filenames")

        retval = list()
        for rng in self._args.frame_ranges:
            if "-" not in rng:
                raise FaceswapError(
                    "Frame Ranges not specified in the correct format")
            start, end = rng.split("-")
            retval.append((max(int(start), minframe), min(int(end), maxframe)))
        logger.debug("frame ranges: %s", retval)
        return retval

    def _load_extractor(self):
        """ Load the CV2-DNN Face Extractor Chain.

        For On-The-Fly conversion we use a CPU based extractor to avoid stacking the GPU.
        Results are poor.

        Returns
        -------
        :class:`plugins.extract.Pipeline.Extractor`
            The face extraction chain to be used for on-the-fly conversion
        """
        if not self._alignments.have_alignments_file and not self._args.on_the_fly:
            logger.error(
                "No alignments file found. Please provide an alignments file for your "
                "destination video (recommended) or enable on-the-fly conversion (not "
                "recommended).")
            sys.exit(1)
        if self._alignments.have_alignments_file:
            if self._args.on_the_fly:
                logger.info(
                    "On-The-Fly conversion selected, but an alignments file was found. "
                    "Using pre-existing alignments file: '%s'",
                    self._alignments.file)
            else:
                logger.debug("Alignments file found: '%s'",
                             self._alignments.file)
            return None

        logger.debug("Loading extractor")
        logger.warning(
            "On-The-Fly conversion selected. This will use the inferior cv2-dnn for "
            "extraction and will produce poor results.")
        logger.warning(
            "It is recommended to generate an alignments file for your destination "
            "video with Extract first for superior results.")
        extractor = Extractor(detector="cv2-dnn",
                              aligner="cv2-dnn",
                              masker=self._args.mask_type,
                              multiprocess=True,
                              rotate_images=None,
                              min_size=20)
        extractor.launch()
        logger.debug("Loaded extractor")
        return extractor

    def _init_threads(self):
        """ Initialize queues and threads.

        Creates the load and save queues and the load and save threads. Starts the threads.
        """
        logger.debug("Initializing DiskIO Threads")
        for task in ("load", "save"):
            self._add_queue(task)
            self._start_thread(task)
        logger.debug("Initialized DiskIO Threads")

    def _add_queue(self, task):
        """ Add the queue to queue_manager and to :attr:`self._queues` for the given task.

        Parameters
        ----------
        task: {"load", "save"}
            The task that the queue is to be added for
        """
        logger.debug("Adding queue for task: '%s'", task)
        if task == "load":
            q_name = "convert_in"
        elif task == "save":
            q_name = "convert_out"
        else:
            q_name = task
        self._queues[task] = queue_manager.get_queue(q_name)
        logger.debug("Added queue for task: '%s'", task)

    def _start_thread(self, task):
        """ Create the thread for the given task, add it it :attr:`self._threads` and start it.

        Parameters
        ----------
        task: {"load", "save"}
            The task that the thread is to be created for
        """
        logger.debug("Starting thread: '%s'", task)
        args = self._completion_event if task == "save" else None
        func = getattr(self, "_{}".format(task))
        io_thread = MultiThread(func, args, thread_count=1)
        io_thread.start()
        self._threads[task] = io_thread
        logger.debug("Started thread: '%s'", task)

    # Loading tasks
    def _load(self, *args):  # pylint: disable=unused-argument
        """ Load frames from disk.

        In a background thread:
            * Loads frames from disk.
            * Discards or passes through cli selected skipped frames
            * Pairs the frame with its :class:`~lib.align.DetectedFace` objects
            * Performs any pre-processing actions
            * Puts the frame and detected faces to the load queue
        """
        logger.debug("Load Images: Start")
        idx = 0
        for filename, image in self._images.load():
            idx += 1
            if self._queues["load"].shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if image is None or (not image.any() and image.ndim not in (2, 3)):
                # All black frames will return not numpy.any() so check dims too
                logger.warning("Unable to open image. Skipping: '%s'",
                               filename)
                continue
            if self._check_skipframe(filename):
                if self._args.keep_unchanged:
                    logger.trace("Saving unchanged frame: %s", filename)
                    out_file = os.path.join(self._args.output_dir,
                                            os.path.basename(filename))
                    self._queues["save"].put((out_file, image))
                else:
                    logger.trace("Discarding frame: '%s'", filename)
                continue

            detected_faces = self._get_detected_faces(filename, image)
            item = dict(filename=filename,
                        image=image,
                        detected_faces=detected_faces)
            self._pre_process.do_actions(item)
            self._queues["load"].put(item)

        logger.debug("Putting EOF")
        self._queues["load"].put("EOF")
        logger.debug("Load Images: Complete")

    def _check_skipframe(self, filename):
        """ Check whether a frame is to be skipped.

        Parameters
        ----------
        filename: str
            The filename of the frame to check

        Returns
        -------
        bool
            ``True`` if the frame is to be skipped otherwise ``False``
        """
        if not self._frame_ranges:
            return None
        indices = self._imageidxre.findall(filename)
        if not indices:
            logger.warning(
                "Could not determine frame number. Frame will be converted: '%s'",
                filename)
            return False
        idx = int(indices[0]) if indices else None
        skipframe = not any(
            map(lambda b: b[0] <= idx <= b[1], self._frame_ranges))
        logger.trace("idx: %s, skipframe: %s", idx, skipframe)
        return skipframe

    def _get_detected_faces(self, filename, image):
        """ Return the detected faces for the given image.

        If we have an alignments file, then the detected faces are created from that file. If
        we're running On-The-Fly then they will be extracted from the extractor.

        Parameters
        ----------
        filename: str
            The filename to return the detected faces for
        image: :class:`numpy.ndarray`
            The frame that the detected faces exist in

        Returns
        -------
        list
            List of :class:`lib.align.DetectedFace` objects
        """
        logger.trace("Getting faces for: '%s'", filename)
        if not self._extractor:
            detected_faces = self._alignments_faces(os.path.basename(filename),
                                                    image)
        else:
            detected_faces = self._detect_faces(filename, image)
        logger.trace("Got %s faces for: '%s'", len(detected_faces), filename)
        return detected_faces

    def _alignments_faces(self, frame_name, image):
        """ Return detected faces from an alignments file.

        Parameters
        ----------
        frame_name: str
            The name of the frame to return the detected faces for
        image: :class:`numpy.ndarray`
            The frame that the detected faces exist in

        Returns
        -------
        list
            List of :class:`lib.align.DetectedFace` objects
        """
        if not self._check_alignments(frame_name):
            return list()

        faces = self._alignments.get_faces_in_frame(frame_name)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def _check_alignments(self, frame_name):
        """ Ensure that we have alignments for the current frame.

        If we have no alignments for this image, skip it and output a message.

        Parameters
        ----------
        frame_name: str
            The name of the frame to check that we have alignments for

        Returns
        -------
        bool
            ``True`` if we have alignments for this face, otherwise ``False``
        """
        have_alignments = self._alignments.frame_exists(frame_name)
        if not have_alignments:
            tqdm.write("No alignment found for {}, "
                       "skipping".format(frame_name))
        return have_alignments

    def _detect_faces(self, filename, image):
        """ Extract the face from a frame for On-The-Fly conversion.

        Pulls detected faces out of the Extraction pipeline.

        Parameters
        ----------
        filename: str
            The filename to return the detected faces for
        image: :class:`numpy.ndarray`
            The frame that the detected faces exist in

        Returns
        -------
        list
            List of :class:`lib.align.DetectedFace` objects
         """
        self._extractor.input_queue.put(ExtractMedia(filename, image))
        faces = next(self._extractor.detected_faces())
        return faces.detected_faces

    # Saving tasks
    def _save(self, completion_event):
        """ Save the converted images.

        Puts the selected writer into a background thread and feeds it from the output of the
        patch queue.

        Parameters
        ----------
        completion_event: :class:`event.Event`
            An even that this process triggers when it has finished saving
        """
        logger.debug("Save Images: Start")
        write_preview = self._args.redirect_gui and self._writer.is_stream
        preview_image = os.path.join(self._writer.output_folder,
                                     ".gui_preview.jpg")
        logger.debug("Write preview for gui: %s", write_preview)
        for idx in tqdm(range(self._total_count),
                        desc="Converting",
                        file=sys.stdout):
            if self._queues["save"].shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self._queues["save"].get()
            if item == "EOF":
                logger.debug("EOF Received")
                break
            filename, image = item
            # Write out preview image for the GUI every 10 frames if writing to stream
            if write_preview and idx % 10 == 0 and not os.path.exists(
                    preview_image):
                logger.debug("Writing GUI Preview image: '%s'", preview_image)
                cv2.imwrite(preview_image, image)
            self._writer.write(filename, image)
        self._writer.close()
        completion_event.set()
        logger.debug("Save Faces: Complete")
Example #18
0
class Extract():
    """ The extract process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector, self.args.aligner,
                                   self.args.loglevel, self.args.multiprocess,
                                   self.args.rotate_images, self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def skip_num(self):
        """ Number of frames to skip if extract_every_n is passed """
        return self.args.extract_every_n if hasattr(self.args,
                                                    "extract_every_n") else 1

    def process(self):
        """ Perform the extraction process """
        logger.info('Starting, this may take a while...')
        # queue_manager.debug_monitor(3)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction()
        save_thread.join()
        self.alignments.save()
        Utils.finalize(self.images.images_found // self.skip_num,
                       self.alignments.faces_count, self.verify_output)

    def threaded_io(self, task, io_args=None):
        """ Perform I/O task in a background thread """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        return io_thread

    def load_images(self):
        """ Load the images """
        logger.debug("Load Images: Start")
        load_queue = self.extractor.input_queue
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if idx % self.skip_num != 0:
                logger.trace("Skipping image '%s' due to extract_every_n = %s",
                             filename, self.skip_num)
                continue
            if image is None or not image.any():
                logger.warning("Unable to open image. Skipping: '%s'",
                               filename)
                continue
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                logger.trace("Skipping image: '%s'", filename)
                continue
            item = {"filename": filename, "image": image}
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        logger.debug("Reload Images: Start. Detected Faces Count: %s",
                     len(detected_faces))
        load_queue = self.extractor.input_queue
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def save_faces(self):
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        while True:
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            logger.trace(item)
            if item == "EOF":
                break
            filename, face = item

            logger.trace("Saving face: '%s'", filename)
            try:
                with open(filename, "wb") as out_file:
                    out_file.write(face)
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s",
                             filename, err)
                continue
        logger.debug("Save Faces: Complete")

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(
            os.path.basename(frame) in self.alignments.data.keys()
            for frame in self.images.input_images)
        logger.debug("Items already processed: %s", processed)

        if processed != 0 and self.args.skip_existing:
            logger.info("Skipping previously extracted frames: %s", processed)
        if processed != 0 and self.args.skip_faces:
            logger.info("Skipping frames with detected faces: %s", processed)

        to_process = (self.images.images_found - processed) // self.skip_num
        logger.debug("Items to be Processed: %s", to_process)
        if to_process == 0:
            logger.error("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_extraction(self):
        """ Run Face Detection """
        to_process = self.process_item_count()
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args,
                                                     "align_eyes") else False
        exception = False

        for phase in range(self.extractor.passes):
            if exception:
                break
            is_final = self.extractor.final_pass
            detected_faces = dict()
            self.extractor.launch()
            for idx, faces in enumerate(
                    tqdm(self.extractor.detected_faces(),
                         total=to_process,
                         file=sys.stdout,
                         desc="Running pass {} of {}: {}".format(
                             phase + 1, self.extractor.passes,
                             self.extractor.phase.title()))):

                exception = faces.get("exception", False)
                if exception:
                    break
                filename = faces["filename"]

                if self.extractor.final_pass:
                    self.output_processing(faces, align_eyes, size, filename)
                    self.output_faces(filename, faces)
                    if self.save_interval and (idx +
                                               1) % self.save_interval == 0:
                        self.alignments.save()
                else:
                    del faces["image"]
                    detected_faces[filename] = faces

            if is_final:
                logger.debug("Putting EOF to save")
                self.save_queue.put("EOF")
            else:
                logger.debug("Reloading images")
                self.threaded_io("reload", detected_faces)

    def output_processing(self, faces, align_eyes, size, filename):
        """ Prepare faces for output """
        self.align_face(faces, align_eyes, size, filename)
        self.post_process.do_actions(faces)

        faces_count = len(faces["detected_faces"])
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(filename))

        if not self.verify_output and faces_count > 1:
            self.verify_output = True

    def align_face(self, faces, align_eyes, size, filename):
        """ Align the detected face and add the destination file path """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_bounding_box(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.load_aligned(image, size=size, align_eyes=align_eyes)
            final_faces.append({
                "file_location":
                self.output_dir / Path(filename).stem,
                "face":
                detected_face
            })
        faces["detected_faces"] = final_faces

    def output_faces(self, filename, faces):
        """ Output faces to save thread """
        final_faces = list()
        for idx, detected_face in enumerate(faces["detected_faces"]):
            output_file = detected_face["file_location"]
            extension = Path(filename).suffix
            out_filename = "{}_{}{}".format(str(output_file), str(idx),
                                            extension)

            face = detected_face["face"]
            resized_face = face.aligned_face

            face.hash, img = hash_encode_image(resized_face, extension)
            self.save_queue.put((out_filename, img))
            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Example #19
0
class DiskIO():
    """ Background threads to:
            Load images from disk and get the detected faces
            Save images back to disk """
    def __init__(self, alignments, images, arguments):
        logger.debug("Initializing %s: (alignments: %s, images: %s, arguments: %s)",
                     self.__class__.__name__, alignments, images, arguments)
        self.alignments = alignments
        self.images = images
        self.args = arguments
        self.pre_process = PostProcess(arguments)
        self.completion_event = Event()
        self.frame_ranges = self.get_frame_ranges()
        self.writer = self.get_writer()

        # For frame skipping
        self.imageidxre = re.compile(r"(\d+)(?!.*\d\.)(?=\.\w+$)")

        # Extractor for on the fly detection
        self.extractor = None
        if not self.alignments.have_alignments_file:
            self.load_extractor()

        self.load_queue = None
        self.save_queue = None
        self.load_thread = None
        self.save_thread = None
        self.init_threads()
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def draw_transparent(self):
        """ Draw transparent is an image writer only parameter.
            Return the value here for easy access for predictor """
        return self.writer.config.get("draw_transparent", False)

    @property
    def pre_encode(self):
        """ Return the writer's pre-encoder """
        dummy = np.zeros((20, 20, 3)).astype("uint8")
        test = self.writer.pre_encode(dummy)
        retval = None if test is None else self.writer.pre_encode
        logger.debug("Writer pre_encode function: %s", retval)
        return retval

    @property
    def total_count(self):
        """ Return the total number of frames to be converted """
        if self.frame_ranges and not self.args.keep_unchanged:
            retval = sum([fr[1] - fr[0] + 1 for fr in self.frame_ranges])
        else:
            retval = self.images.images_found
        logger.debug(retval)
        return retval

    # Initalization
    def get_writer(self):
        """ Return the writer plugin """
        args = [self.args.output_dir]
        if self.args.writer in ("ffmpeg", "gif"):
            args.append(self.total_count)
        if self.args.writer == "ffmpeg":
            if self.images.is_video:
                args.append(self.args.input_dir)
            else:
                args.append(self.args.reference_video)
        logger.debug("Writer args: %s", args)
        return PluginLoader.get_converter("writer", self.args.writer)(*args)

    def get_frame_ranges(self):
        """ split out the frame ranges and parse out 'min' and 'max' values """
        if not self.args.frame_ranges:
            logger.debug("No frame range set")
            return None

        minmax = {"min": 0,  # never any frames less than 0
                  "max": float("inf")}
        retval = [tuple(map(lambda q: minmax[q] if q in minmax.keys() else int(q), v.split("-")))
                  for v in self.args.frame_ranges]
        logger.debug("frame ranges: %s", retval)
        return retval

    def load_extractor(self):
        """ Set on the fly extraction """
        logger.debug("Loading extractor")
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning("NB: This will use the inferior dlib-hog for extraction "
                       "and dlib pose predictor for landmarks. It is recommended "
                       "to perfom Extract first for superior results")
        extract_args = {"detector": "dlib-hog",
                        "aligner": "dlib",
                        "loglevel": self.args.loglevel}
        self.extractor = Extractor(None, extract_args)
        self.extractor.launch_detector()
        self.extractor.launch_aligner()
        logger.debug("Loaded extractor")

    def init_threads(self):
        """ Initialize queues and threads """
        logger.debug("Initializing DiskIO Threads")
        for task in ("load", "save"):
            self.add_queue(task)
            self.start_thread(task)
        logger.debug("Initialized DiskIO Threads")

    def add_queue(self, task):
        """ Add the queue to queue_manager and set queue attribute """
        logger.debug("Adding queue for task: '%s'", task)
        q_name = "convert_in" if task == "load" else task
        setattr(self, "{}_queue".format(task), queue_manager.get_queue(q_name))
        logger.debug("Added queue for task: '%s'", task)

    def start_thread(self, task):
        """ Start the DiskIO thread """
        logger.debug("Starting thread: '%s'", task)
        args = self.completion_event if task == "save" else None
        func = getattr(self, task)
        io_thread = MultiThread(func, args, thread_count=1)
        io_thread.start()
        setattr(self, "{}_thread".format(task), io_thread)
        logger.debug("Started thread: '%s'", task)

    # Loading tasks
    def load(self, *args):  # pylint: disable=unused-argument
        """ Load the images with detected_faces"""
        logger.debug("Load Images: Start")
        extract_queue = queue_manager.get_queue("extract_in") if self.extractor else None
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if self.load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if image is None or not image.any():
                logger.warning("Unable to open image. Skipping: '%s'", filename)
                continue
            if self.check_skipframe(filename):
                if self.args.keep_unchanged:
                    logger.trace("Saving unchanged frame: %s", filename)
                    out_file = os.path.join(self.args.output_dir, os.path.basename(filename))
                    self.save_queue.put((out_file, image))
                else:
                    logger.trace("Discarding frame: '%s'", filename)
                continue

            detected_faces = self.get_detected_faces(filename, image, extract_queue)
            item = dict(filename=filename, image=image, detected_faces=detected_faces)
            self.pre_process.do_actions(item)
            self.load_queue.put(item)

        self.load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def check_skipframe(self, filename):
        """ Check whether frame is to be skipped """
        if not self.frame_ranges:
            return None
        indices = self.imageidxre.findall(filename)
        if not indices:
            logger.warning("Could not determine frame number. Frame will be converted: '%s'",
                           filename)
            return False
        idx = int(indices[0]) if indices else None
        skipframe = not any(map(lambda b: b[0] <= idx <= b[1], self.frame_ranges))
        return skipframe

    def get_detected_faces(self, filename, image, extract_queue):
        """ Return detected faces from alignments or detector """
        logger.trace("Getting faces for: '%s'", filename)
        if not self.extractor:
            detected_faces = self.alignments_faces(os.path.basename(filename), image)
        else:
            detected_faces = self.detect_faces(extract_queue, filename, image)
        logger.trace("Got %s faces for: '%s'", len(detected_faces), filename)
        return detected_faces

    def alignments_faces(self, frame, image):
        """ Get the face from alignments file """
        if not self.check_alignments(frame):
            return list()

        faces = self.alignments.get_faces_in_frame(frame)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def check_alignments(self, frame):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.alignments.frame_exists(frame)
        if not have_alignments:
            tqdm.write("No alignment found for {}, "
                       "skipping".format(frame))
        return have_alignments

    def detect_faces(self, load_queue, filename, image):
        """ Extract the face from a frame (If alignments file not found) """
        inp = {"filename": filename,
               "image": image}
        load_queue.put(inp)
        faces = next(self.extractor.detect_faces())

        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        final_faces = list()

        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_dlib_rect(face)
            detected_face.landmarksXY = landmarks[idx]
            final_faces.append(detected_face)
        return final_faces

    # Saving tasks
    def save(self, completion_event):
        """ Save the converted images """
        logger.debug("Save Images: Start")
        for _ in tqdm(range(self.total_count), desc="Converting", file=sys.stdout):
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            if item == "EOF":
                break
            filename, image = item
            self.writer.write(filename, image)
        self.writer.close()
        completion_event.set()
        logger.debug("Save Faces: Complete")
Example #20
0
class Extract():
    """ The extract process. """

    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        print("Output Directory: {}".format(self.args.output_dir))
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True)
        self.plugins = Plugins(self.args)

        self.post_process = PostProcess(arguments)

        self.export_face = True
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval

    def process(self):
        """ Perform the extraction process """
        print('Starting, this may take a while...')
        Utils.set_verbosity(self.args.verbose)
#        queue_manager.debug_monitor(1)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction(save_thread)
        self.alignments.save()
        Utils.finalize(self.images.images_found,
                       self.alignments.faces_count,
                       self.verify_output)
        self.plugins.process_detect.join()
        self.plugins.process_align.join()

    def threaded_io(self, task, io_args=None):
        """ Load images in a background thread """
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(thread_count=1)
        io_thread.in_thread(func, *io_args)
        return io_thread

    def load_images(self):
        """ Load the images """
        load_queue = queue_manager.get_queue("load")
        for filename, image in self.images.load():
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                continue
            load_queue.put((filename, image))
        load_queue.put("EOF")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        load_queue = queue_manager.get_queue("detect")
        for filename, image in self.images.load():
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")

    def save_faces(self):
        """ Save the generated faces """
        if not self.export_face:
            return

        save_queue = queue_manager.get_queue("save")
        while True:
            item = save_queue.get()
            if item == "EOF":
                break
            filename, output_file, resized_face, idx = item
            out_filename = "{}_{}{}".format(str(output_file),
                                            str(idx),
                                            Path(filename).suffix)
            # pylint: disable=no-member
            cv2.imwrite(out_filename, resized_face)

    def run_extraction(self, save_thread):
        """ Run Face Detection """
        to_process = self.process_item_count()
        frame_no = 0
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args, "align_eyes") else False

        if self.plugins.is_parallel:
            self.plugins.launch_aligner()
            self.plugins.launch_detector()
        if not self.plugins.is_parallel:
            self.run_detection(to_process)
            self.plugins.launch_aligner()

        for faces in tqdm(self.plugins.detect_faces(extract_pass="******"),
                          total=to_process,
                          file=sys.stdout,
                          desc="Extracting faces"):

            exception = faces.get("exception", False)
            if exception:
                exit(1)
            filename = faces["filename"]
            faces["output_file"] = self.output_dir / Path(filename).stem

            self.align_face(faces, align_eyes, size)
            self.post_process.do_actions(faces)

            faces_count = len(faces["detected_faces"])
            if self.args.verbose and faces_count == 0:
                print("Warning: No faces were detected in image: "
                      "{}".format(os.path.basename(filename)))

            if not self.verify_output and faces_count > 1:
                self.verify_output = True

            self.process_faces(filename, faces)

            frame_no += 1
            if frame_no == self.save_interval:
                self.alignments.save()
                frame_no = 0

        if self.export_face:
            queue_manager.get_queue("save").put("EOF")
        save_thread.join_threads()

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(os.path.basename(frame) in self.alignments.data.keys()
                        for frame in self.images.input_images)

        if processed != 0 and self.args.skip_existing:
            print("Skipping {} previously extracted frames".format(processed))
        if processed != 0 and self.args.skip_faces:
            print("Skipping {} frames with detected faces".format(processed))

        to_process = self.images.images_found - processed
        if to_process == 0:
            print("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_detection(self, to_process):
        """ Run detection only """
        self.plugins.launch_detector()
        detected_faces = dict()
        for detected in tqdm(self.plugins.detect_faces(extract_pass="******"),
                             total=to_process,
                             file=sys.stdout,
                             desc="Detecting faces"):
            exception = detected.get("exception", False)
            if exception:
                break

            del detected["image"]
            filename = detected["filename"]

            detected_faces[filename] = detected

        self.threaded_io("reload", detected_faces)

    @staticmethod
    def align_face(faces, align_eyes, size, padding=48):
        """ Align the detected face """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_dlib_rect(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.frame_dims = image.shape[:2]
            detected_face.load_aligned(image,
                                       size=size,
                                       padding=padding,
                                       align_eyes=align_eyes)
            final_faces.append(detected_face)
        faces["detected_faces"] = final_faces

    def process_faces(self, filename, faces):
        """ Perform processing on found faces """
        final_faces = list()
        save_queue = queue_manager.get_queue("save")
        filename = faces["filename"]
        output_file = faces["output_file"]

        for idx, face in enumerate(faces["detected_faces"]):
            if self.export_face:
                save_queue.put((filename,
                                output_file,
                                face.aligned_face,
                                idx))

            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Example #21
0
class Extract():
    """ The extract process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True)
        self.plugins = Plugins(self.args)

        self.post_process = PostProcess(arguments)

        self.export_face = True
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)

    def process(self):
        """ Perform the extraction process """
        logger.info('Starting, this may take a while...')
        Utils.set_verbosity()
        #        queue_manager.debug_monitor(1)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction(save_thread)
        self.alignments.save()
        Utils.finalize(self.images.images_found, self.alignments.faces_count,
                       self.verify_output)

    def threaded_io(self, task, io_args=None):
        """ Load images in a background thread """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        return io_thread

    def load_images(self):
        """ Load the images """
        logger.debug("Load Images: Start")
        load_queue = queue_manager.get_queue("load")
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                logger.trace("Skipping image: '%s'", filename)
                continue
            item = {"filename": filename, "image": image}
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        logger.debug("Reload Images: Start. Detected Faces Count: %s",
                     len(detected_faces))
        load_queue = queue_manager.get_queue("detect")
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def save_faces(self):
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        if not self.export_face:
            logger.debug("Not exporting faces")
            logger.debug("Save Faces: Complete")
            return

        save_queue = queue_manager.get_queue("save")
        while True:
            if save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = save_queue.get()
            if item == "EOF":
                break
            filename, output_file, resized_face, idx = item
            out_filename = "{}_{}{}".format(str(output_file), str(idx),
                                            Path(filename).suffix)
            logger.trace("Saving face: '%s'", out_filename)
            try:
                cv2.imwrite(out_filename, resized_face)  # pylint: disable=no-member
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s",
                             out_filename, err)
                continue
        logger.debug("Save Faces: Complete")

    def run_extraction(self, save_thread):
        """ Run Face Detection """
        save_queue = queue_manager.get_queue("save")
        to_process = self.process_item_count()
        frame_no = 0
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args,
                                                     "align_eyes") else False

        if self.plugins.is_parallel:
            logger.debug("Using parallel processing")
            self.plugins.launch_aligner()
            self.plugins.launch_detector()
        if not self.plugins.is_parallel:
            logger.debug("Using serial processing")
            self.run_detection(to_process)
            self.plugins.launch_aligner()

        for faces in tqdm(self.plugins.detect_faces(extract_pass="******"),
                          total=to_process,
                          file=sys.stdout,
                          desc="Extracting faces"):

            filename = faces["filename"]

            self.align_face(faces, align_eyes, size, filename)
            self.post_process.do_actions(faces)

            faces_count = len(faces["detected_faces"])
            if faces_count == 0:
                logger.verbose("No faces were detected in image: %s",
                               os.path.basename(filename))

            if not self.verify_output and faces_count > 1:
                self.verify_output = True

            self.process_faces(filename, faces, save_queue)

            frame_no += 1
            if frame_no == self.save_interval:
                self.alignments.save()
                frame_no = 0

        if self.export_face:
            save_queue.put("EOF")
        save_thread.join()

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(
            os.path.basename(frame) in self.alignments.data.keys()
            for frame in self.images.input_images)
        logger.debug("Items already processed: %s", processed)

        if processed != 0 and self.args.skip_existing:
            logger.info("Skipping previously extracted frames: %s", processed)
        if processed != 0 and self.args.skip_faces:
            logger.info("Skipping frames with detected faces: %s", processed)

        to_process = self.images.images_found - processed
        logger.debug("Items to be Processed: %s", to_process)
        if to_process == 0:
            logger.error("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_detection(self, to_process):
        """ Run detection only """
        self.plugins.launch_detector()
        detected_faces = dict()
        for detected in tqdm(self.plugins.detect_faces(extract_pass="******"),
                             total=to_process,
                             file=sys.stdout,
                             desc="Detecting faces"):
            exception = detected.get("exception", False)
            if exception:
                break

            del detected["image"]
            filename = detected["filename"]

            detected_faces[filename] = detected

        self.threaded_io("reload", detected_faces)

    def align_face(self, faces, align_eyes, size, filename, padding=48):
        """ Align the detected face and add the destination file path """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_dlib_rect(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.frame_dims = image.shape[:2]
            detected_face.load_aligned(image,
                                       size=size,
                                       padding=padding,
                                       align_eyes=align_eyes)
            final_faces.append({
                "file_location":
                self.output_dir / Path(filename).stem,
                "face":
                detected_face
            })
        faces["detected_faces"] = final_faces

    def process_faces(self, filename, faces, save_queue):
        """ Perform processing on found faces """
        final_faces = list()
        filename = faces["filename"]

        for idx, detected_face in enumerate(faces["detected_faces"]):
            if self.export_face:
                save_queue.put((filename, detected_face["file_location"],
                                detected_face["face"].aligned_face, idx))

            final_faces.append(detected_face["face"].to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Example #22
0
class Convert():
    """ The convert process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        self.extract_faces = False
        self.faces_count = 0

        self.images = Images(self.args)
        self.alignments = Alignments(self.args, False, self.images.is_video)

        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images,
               arguments.input_aligned_dir)

        self.post_process = PostProcess(arguments)
        self.verify_output = False

        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)
        logger.debug("Initialized %s", self.__class__.__name__)

    def process(self):
        """ Original & LowMem models go with Adjust or Masked converter

            Note: GAN prediction outputs a mask + an image, while other
            predicts only an image. """
        Utils.set_verbosity(self.args.loglevel)

        if not self.alignments.have_alignments_file:
            self.load_extractor()

        model = self.load_model()
        converter = self.load_converter(model)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        for item in batch.iterator():
            self.convert(converter, item)

        if self.extract_faces:
            queue_manager.terminate_queues()

        Utils.finalize(self.images.images_found, self.faces_count,
                       self.verify_output)

    def load_extractor(self):
        """ Set on the fly extraction """
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning(
            "NB: This will use the inferior dlib-hog for extraction "
            "and dlib pose predictor for landmarks. It is recommended "
            "to perfom Extract first for superior results")
        for task in ("load", "detect", "align"):
            queue_manager.add_queue(task, maxsize=0)

        detector = PluginLoader.get_detector("dlib_hog")(
            loglevel=self.args.loglevel)
        aligner = PluginLoader.get_aligner("dlib")(loglevel=self.args.loglevel)

        d_kwargs = {
            "in_queue": queue_manager.get_queue("load"),
            "out_queue": queue_manager.get_queue("detect")
        }
        a_kwargs = {
            "in_queue": queue_manager.get_queue("detect"),
            "out_queue": queue_manager.get_queue("align")
        }

        d_process = SpawnProcess(detector.run, **d_kwargs)
        d_event = d_process.event
        d_process.start()

        a_process = SpawnProcess(aligner.run, **a_kwargs)
        a_event = a_process.event
        a_process.start()

        d_event.wait(10)
        if not d_event.is_set():
            raise ValueError("Error inititalizing Detector")
        a_event.wait(10)
        if not a_event.is_set():
            raise ValueError("Error inititalizing Aligner")

        self.extract_faces = True

    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            logger.error("Model Not Found! A valid model "
                         "must be provided to continue!")
            exit(1)

        return model

    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(
            model.converter(False),
            trainer=args.trainer,
            blur_size=args.blur_size,
            seamless_clone=args.seamless_clone,
            sharpen_image=args.sharpen_image,
            mask_type=args.mask_type,
            erosion_kernel_size=args.erosion_kernel_size,
            match_histogram=args.match_histogram,
            smooth_mask=args.smooth_mask,
            avg_color_adjust=args.avg_color_adjust,
            draw_transparent=args.draw_transparent)

        return converter

    def prepare_images(self):
        """ Prepare the images for conversion """
        filename = ""
        for filename, image in tqdm(self.images.load(),
                                    total=self.images.images_found,
                                    file=sys.stdout):

            if (self.args.discard_frames
                    and self.opts.check_skipframe(filename) == "discard"):
                continue

            frame = os.path.basename(filename)
            if self.extract_faces:
                detected_faces = self.detect_faces(filename, image)
            else:
                detected_faces = self.alignments_faces(frame, image)

            faces_count = len(detected_faces)
            if faces_count != 0:
                # Post processing requires a dict with "detected_faces" key
                self.post_process.do_actions(
                    {"detected_faces": detected_faces})
                self.faces_count += faces_count

            if faces_count > 1:
                self.verify_output = True
                logger.verbose("Found more than one face in "
                               "an image! '%s'", frame)

            yield filename, image, detected_faces

    @staticmethod
    def detect_faces(filename, image):
        """ Extract the face from a frame (If not alignments file found) """
        queue_manager.get_queue("load").put((filename, image))
        item = queue_manager.get_queue("align").get()
        detected_faces = item["detected_faces"]
        return detected_faces

    def alignments_faces(self, frame, image):
        """ Get the face from alignments file """
        if not self.check_alignments(frame):
            return None

        faces = self.alignments.get_faces_in_frame(frame)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def check_alignments(self, frame):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.alignments.frame_exists(frame)
        if not have_alignments:
            tqdm.write("No alignment found for {}, " "skipping".format(frame))
        return have_alignments

    def convert(self, converter, item):
        """ Apply the conversion transferring faces onto frames """
        try:
            filename, image, faces = item
            skip = self.opts.check_skipframe(filename)

            if not skip:
                for face in faces:
                    image = self.convert_one_face(converter, image, face)
                filename = str(self.output_dir / Path(filename).name)
                cv2.imwrite(filename, image)  # pylint: disable=no-member
        except Exception as err:
            logger.error("Failed to convert image: '%s'. Reason: %s", filename,
                         err)
            raise

    def convert_one_face(self, converter, image, face):
        """ Perform the conversion on the given frame for a single face """
        # TODO: This switch between 64 and 128 is a hack for now.
        # We should have a separate cli option for size
        size = 128 if (self.args.trainer.strip().lower()
                       in ('gan128', 'originalhighres')) else 64

        image = converter.patch_image(image, face, size)
        return image
Example #23
0
class Predict():
    """ Predict faces from incoming queue """
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug(
            "Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
            self.__class__.__name__, arguments, queue_size, in_queue)
        self.batchsize = min(queue_size, 16)
        self.args = arguments
        self.in_queue = in_queue
        self.out_queue = queue_manager.get_queue("patch")
        self.serializer = Serializer.get_serializer("json")
        self.faces_count = 0
        self.verify_output = False
        self.pre_process = PostProcess(arguments)
        self.model = self.load_model()
        self.predictor = self.model.converter(self.args.swap_model)
        self.queues = dict()

        self.thread = MultiThread(self.predict_faces, thread_count=1)
        self.thread.start()
        logger.debug("Initialized %s: (out_queue: %s)",
                     self.__class__.__name__, self.out_queue)

    @property
    def coverage_ratio(self):
        """ Return coverage ratio from training options """
        return self.model.training_opts["coverage_ratio"]

    @property
    def input_size(self):
        """ Return the model input size """
        return self.model.input_shape[0]

    @property
    def output_size(self):
        """ Return the model output size """
        return self.model.output_shape[0]

    @property
    def input_mask(self):
        """ Return the input mask """
        mask = np.zeros(self.model.state.mask_shapes[0], dtype="float32")
        retval = np.expand_dims(mask, 0)
        return retval

    @property
    def has_predicted_mask(self):
        """ Return whether this model has a predicted mask """
        return bool(self.model.state.mask_shapes)

    def load_model(self):
        """ Load the model requested for conversion """
        logger.debug("Loading Model")
        model_dir = get_folder(self.args.model_dir, make_folder=False)
        if not model_dir:
            logger.error("%s does not exist.", self.args.model_dir)
            exit(1)
        trainer = self.get_trainer(model_dir)
        model = PluginLoader.get_model(trainer)(model_dir,
                                                self.args.gpus,
                                                predict=True)
        logger.debug("Loaded Model")
        return model

    def get_trainer(self, model_dir):
        """ Return the trainer name if provided, or read from state file """
        if self.args.trainer:
            logger.debug("Trainer name provided: '%s'", self.args.trainer)
            return self.args.trainer

        statefile = [
            fname for fname in os.listdir(str(model_dir))
            if fname.endswith("_state.json")
        ]
        if len(statefile) != 1:
            logger.error(
                "There should be 1 state file in your model folder. %s were found. "
                "Specify a trainer with the '-t', '--trainer' option.")
            exit(1)
        statefile = os.path.join(str(model_dir), statefile[0])

        with open(statefile, "rb") as inp:
            state = self.serializer.unmarshal(inp.read().decode("utf-8"))
            trainer = state.get("name", None)

        if not trainer:
            logger.error(
                "Trainer name could not be read from state file. "
                "Specify a trainer with the '-t', '--trainer' option.")
            exit(1)
        logger.debug("Trainer from state file: '%s'", trainer)
        return trainer

    def predict_faces(self):
        """ Get detected faces from images """
        faces_seen = 0
        batch = list()
        while True:
            item = self.in_queue.get()
            if item != "EOF":
                logger.trace("Got from queue: '%s'", item["filename"])
                faces_count = len(item["detected_faces"])
                if faces_count != 0:
                    self.pre_process.do_actions(item)
                    self.faces_count += faces_count
                if faces_count > 1:
                    self.verify_output = True
                    logger.verbose(
                        "Found more than one face in an image! '%s'",
                        os.path.basename(item["filename"]))

                self.load_aligned(item)

                faces_seen += faces_count
                batch.append(item)

            if faces_seen < self.batchsize and item != "EOF":
                logger.trace("Continuing. Current batchsize: %s", faces_seen)
                continue

            if batch:
                detected_batch = [
                    detected_face for item in batch
                    for detected_face in item["detected_faces"]
                ]
                feed_faces = self.compile_feed_faces(detected_batch)
                predicted = self.predict(feed_faces)

                self.queue_out_frames(batch, predicted)

            faces_seen = 0
            batch = list()
            if item == "EOF":
                logger.debug("Load queue complete")
                break
        self.out_queue.put("EOF")

    def load_aligned(self, item):
        """ Load the feed faces and reference output faces """
        logger.trace("Loading aligned faces: '%s'", item["filename"])
        for detected_face in item["detected_faces"]:
            detected_face.load_feed_face(item["image"],
                                         size=self.input_size,
                                         coverage_ratio=self.coverage_ratio,
                                         dtype="float32")
            if self.input_size == self.output_size:
                detected_face.reference = detected_face.feed
            else:
                detected_face.load_reference_face(
                    item["image"],
                    size=self.output_size,
                    coverage_ratio=self.coverage_ratio,
                    dtype="float32")
        logger.trace("Loaded aligned faces: '%s'", item["filename"])

    @staticmethod
    def compile_feed_faces(detected_faces):
        """ Compile the faces for feeding into the predictor """
        logger.trace("Compiling feed face. Batchsize: %s", len(detected_faces))
        feed_faces = np.stack(
            [detected_face.feed_face for detected_face in detected_faces])
        logger.trace("Compiled Feed faces. Shape: %s", feed_faces.shape)
        return feed_faces

    def predict(self, feed_faces):
        """ Perform inference on the feed """
        logger.trace("Predicting: Batchsize: %s", len(feed_faces))
        feed = [feed_faces]
        if self.has_predicted_mask:
            feed.append(np.repeat(self.input_mask, feed_faces.shape[0],
                                  axis=0))
        logger.trace("Input shape(s): %s", [item.shape for item in feed])

        predicted = self.predictor(feed)
        predicted = predicted if isinstance(predicted, list) else [predicted]
        logger.trace("Output shape(s): %s",
                     [predict.shape for predict in predicted])

        # Compile masks into alpha channel or keep raw faces
        predicted = np.concatenate(
            predicted, axis=-1) if len(predicted) == 2 else predicted[0]
        predicted = predicted.astype("float32")

        logger.trace("Final shape: %s", predicted.shape)
        return predicted

    def queue_out_frames(self, batch, swapped_faces):
        """ Compile the batch back to original frames and put to out_queue """
        logger.trace("Queueing out batch. Batchsize: %s", len(batch))
        pointer = 0
        for item in batch:
            num_faces = len(item["detected_faces"])
            if num_faces == 0:
                item["swapped_faces"] = np.array(list())
            else:
                item["swapped_faces"] = swapped_faces[pointer:pointer +
                                                      num_faces]

            logger.trace(
                "Putting to queue. ('%s', detected_faces: %s, swapped_faces: %s)",
                item["filename"], len(item["detected_faces"]),
                item["swapped_faces"].shape[0])
            self.out_queue.put(item)
            pointer += num_faces
        logger.trace("Queued out batch. Batchsize: %s", len(batch))
Example #24
0
class Convert():
    """ The convert process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)
        self.extractor = None
        self.faces_count = 0

        self.images = Images(self.args)
        self.alignments = Alignments(self.args, False, self.images.is_video)

        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images,
               arguments.input_aligned_dir)

        self.post_process = PostProcess(arguments)
        self.verify_output = False

        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)
        logger.debug("Initialized %s", self.__class__.__name__)

    def process(self):
        """ Original & LowMem models go with converter

            Note: GAN prediction outputs a mask + an image, while other
            predicts only an image. """
        Utils.set_verbosity(self.args.loglevel)

        if not self.alignments.have_alignments_file:
            self.load_extractor()

        model = self.load_model()
        converter = self.load_converter(model)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        for item in batch.iterator():
            self.convert(converter, item)

        if self.extractor:
            queue_manager.terminate_queues()

        Utils.finalize(self.images.images_found, self.faces_count,
                       self.verify_output)

    def load_extractor(self):
        """ Set on the fly extraction """
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning(
            "NB: This will use the inferior dlib-hog for extraction "
            "and dlib pose predictor for landmarks. It is recommended "
            "to perfom Extract first for superior results")
        extract_args = {
            "detector": "dlib-hog",
            "aligner": "dlib",
            "loglevel": self.args.loglevel
        }
        self.extractor = Extractor(None, extract_args)
        self.extractor.launch_detector()
        self.extractor.launch_aligner()

    def load_model(self):
        """ Load the model requested for conversion """
        logger.debug("Loading Model")
        model_dir = get_folder(self.args.model_dir)
        model = PluginLoader.get_model(self.args.trainer)(model_dir,
                                                          self.args.gpus,
                                                          predict=True)
        logger.debug("Loaded Model")
        return model

    def load_converter(self, model):
        """ Load the requested converter for conversion """
        conv = self.args.converter
        converter = PluginLoader.get_converter(conv)(model.converter(
            self.args.swap_model),
                                                     model=model,
                                                     arguments=self.args)
        return converter

    def prepare_images(self):
        """ Prepare the images for conversion """
        filename = ""
        if self.extractor:
            load_queue = queue_manager.get_queue("load")
        for filename, image in tqdm(self.images.load(),
                                    total=self.images.images_found,
                                    file=sys.stdout):

            if (self.args.discard_frames
                    and self.opts.check_skipframe(filename) == "discard"):
                continue

            frame = os.path.basename(filename)
            if self.extractor:
                detected_faces = self.detect_faces(load_queue, filename, image)
            else:
                detected_faces = self.alignments_faces(frame, image)

            faces_count = len(detected_faces)
            if faces_count != 0:
                # Post processing requires a dict with "detected_faces" key
                self.post_process.do_actions(
                    {"detected_faces": detected_faces})
                self.faces_count += faces_count

            if faces_count > 1:
                self.verify_output = True
                logger.verbose("Found more than one face in "
                               "an image! '%s'", frame)

            yield filename, image, detected_faces

    def detect_faces(self, load_queue, filename, image):
        """ Extract the face from a frame (If alignments file not found) """
        inp = {"filename": filename, "image": image}
        load_queue.put(inp)
        faces = next(self.extractor.detect_faces())

        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        final_faces = list()

        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_dlib_rect(face)
            detected_face.landmarksXY = landmarks[idx]
            final_faces.append(detected_face)
        return final_faces

    def alignments_faces(self, frame, image):
        """ Get the face from alignments file """
        if not self.check_alignments(frame):
            return list()

        faces = self.alignments.get_faces_in_frame(frame)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def check_alignments(self, frame):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.alignments.frame_exists(frame)
        if not have_alignments:
            tqdm.write("No alignment found for {}, " "skipping".format(frame))
        return have_alignments

    def convert(self, converter, item):
        """ Apply the conversion transferring faces onto frames """
        try:
            filename, image, faces = item
            skip = self.opts.check_skipframe(filename)

            if not skip:
                for face in faces:
                    image = converter.patch_image(image, face)
                filename = str(self.output_dir / Path(filename).name)

                if self.args.draw_transparent:
                    filename = "{}.png".format(os.path.splitext(filename)[0])
                    logger.trace("Set extension to png: `%s`", filename)

                cv2.imwrite(filename, image)  # pylint: disable=no-member
        except Exception as err:
            logger.error("Failed to convert image: '%s'. Reason: %s", filename,
                         err)
            raise
Example #25
0
class Live():  # pylint:disable=too-few-public-methods
    """ The Faceswap Face Conversion Process.

    The conversion process is responsible for swapping the faces on source frames with the output
    from a trained model.

    It leverages a series of user selected post-processing plugins, executed from
    :class:`lib.convert.Converter`.

    The convert process is self contained and should not be referenced by any other scripts, so it
    contains no public properties.

    Parameters
    ----------
    arguments: :class:`argparse.Namespace`
        The arguments to be passed to the convert process as generated from Faceswap's command
        line arguments
    """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__, arguments)
        self._args = arguments

        self.batch = list()

        self._serializer = get_serializer("json")
        self._pre_process = PostProcess(arguments)
        self._writer = self._get_writer()
        self._extractor = self._load_extractor()

        self._batchsize = self._get_batchsize(self._queue_size)
        self._model = self._load_model()
        self._output_indices = {"face": self._model.largest_face_index,
                                "mask": self._model.largest_mask_index}

        self._predictor = self._model.converter(False)

        configfile = self._args.configfile if hasattr(self._args, "configfile") else None
        self._converter = Converter(self.output_size,
                                    self.coverage_ratio,
                                    self.draw_transparent,
                                    self.pre_encode,
                                    arguments,
                                    configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def draw_transparent(self):
        """ bool: ``True`` if the selected writer's Draw_transparent configuration item is set
        otherwise ``False`` """
        return self._writer.config.get("draw_transparent", False)

    @property
    def pre_encode(self):
        """ python function: Selected writer's pre-encode function, if it has one,
        otherwise ``None`` """
        dummy = np.zeros((20, 20, 3), dtype="uint8")
        test = self._writer.pre_encode(dummy)
        retval = None if test is None else self._writer.pre_encode
        logger.debug("Writer pre_encode function: %s", retval)
        return retval

    @property
    def coverage_ratio(self):
        """ float: The coverage ratio that the model was trained at. """
        return self._model.training_opts["coverage_ratio"]

    @property
    def output_size(self):
        """ int: The size in pixels of the Faceswap model output. """
        return self._model.output_shape[0]

    @property
    def _queue_size(self):
        """ int: Size of the converter queues. 16 for single process otherwise 32 """
        if self._args.singleprocess:
            retval = 16
        else:
            retval = 32
        logger.debug(retval)
        return retval

    @property
    def _pool_processes(self):
        """ int: The number of threads to run in parallel. Based on user options and number of
        available processors. """
        retval = 1

        return retval

    @staticmethod
    def _get_batchsize(queue_size):
        """ Get the batch size for feeding the model.

        Sets the batch size to 1 if inference is being run on CPU, otherwise the minimum of the
        :attr:`self._queue_size` and 16.

        Returns
        -------
        int
            The batch size that the model is to be fed at.
        """
        logger.debug("Getting batchsize")
        is_cpu = GPUStats().device_count == 0
        batchsize = 1 if is_cpu else 16
        batchsize = min(queue_size, batchsize)
        logger.debug("Batchsize: %s", batchsize)
        logger.debug("Got batchsize: %s", batchsize)
        return batchsize

    def _add_queues(self):
        """ Add the queues for in, patch and out. """
        logger.debug("Adding queues. Queue size: %s", self._queue_size)
        for qname in ("convert_in", "convert_out", "patch"):
            queue_manager.add_queue(qname, self._queue_size)

    def process(self):
        """ The entry point for triggering the Conversion Process.

        Should only be called from  :class:`lib.cli.ScriptExecutor`
        """
        logger.debug("Starting Conversion")
        # queue_manager.debug_monitor(5)
        try:
            self._convert_images()

            logger.debug("Completed Conversion")
        except MemoryError as err:
            msg = ("Faceswap ran out of RAM running convert. Conversion is very system RAM "
                   "heavy, so this can happen in certain circumstances when you have a lot of "
                   "cpus but not enough RAM to support them all."
                   "\nYou should lower the number of processes in use by either setting the "
                   "'singleprocess' flag (-sp) or lowering the number of parallel jobs (-j).")
            raise FaceswapError(msg) from err

    def _convert_images(self):
        """ Start the multi-threaded patching process, monitor all threads for errors and join on
        completion. """
        logger.debug("Converting images")
        video_capture = cv2.VideoCapture(0)
        time.sleep(1)

        width = video_capture.get(3)  # float
        height = video_capture.get(4)  # float
        print("webcam dimensions = {} x {}".format(width, height))

        while True:
            ret, frame = video_capture.read()
            #frame = cv2.resize(frame, (640, 480))
            # flip image, because webcam inverts it and we trained the model the other way!
            frame = cv2.flip(frame, 1)
            image = self._convert_frame(frame, convert_colors=False)
            # flip it back
            #image = cv2.flip(image, 1)
            #image = cv2.resize(image, (640, 480))
            img = cv2.imread(self._writer.output_filename("result"))
            img = cv2.resize(img, (1280, 720))
            cv2.imshow('Video', img)
            # print("writing to screen")

            # Hit 'q' on the keyboard to quit!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                video_capture.release()
                break

        cv2.destroyAllWindows()
        exit()

    def _convert_frame(self, frame, convert_colors=True):
        detected_faces = self._get_detected_faces("camera", frame)
        if len(detected_faces) == 0:
            return frame
        item = dict(filename="camera", image=frame, detected_faces=detected_faces)
        self.load_aligned(item)

        self.batch.clear()
        self.batch.append(item)
        detected_batch = [detected_face for item in self.batch
                          for detected_face in item["detected_faces"]]
        feed_faces = self._compile_feed_faces(detected_batch)
        predicted = self._predict(feed_faces, 1)

        pointer = 0
        for item in self.batch:
            num_faces = len(item["detected_faces"])
            if num_faces == 0:
                item["swapped_faces"] = np.array(list())
            else:
                item["swapped_faces"] = predicted[pointer:pointer + num_faces]
            pointer += num_faces

        imager = self._converter._patch_image(item)

        self._writer.write("result", imager)

        return imager

    def _convert_images2(self):
        """ Start the multi-threaded patching process, monitor all threads for errors and join on
        completion. """

        for filename, image in self._images.load():
            imager = self._process_image(filename, image)
            self._writer.write(filename, imager)



    def _process_image(self, filename, image):
        detected_faces = self._get_detected_faces(filename, image)
        item = dict(filename=filename, image=image, detected_faces=detected_faces)
        self._pre_process.do_actions(item)

        self.load_aligned(item)

        self.batch.clear()
        self.batch.append(item)
        detected_batch = [detected_face for item in self.batch
                          for detected_face in item["detected_faces"]]
        feed_faces = self._compile_feed_faces(detected_batch)
        predicted = self._predict(feed_faces, 1)

        pointer = 0
        for item in self.batch:
            num_faces = len(item["detected_faces"])
            if num_faces == 0:
                item["swapped_faces"] = np.array(list())
            else:
                item["swapped_faces"] = predicted[pointer:pointer + num_faces]
            pointer += num_faces

        imager = self._converter._patch_image(item)
        return imager

    def _get_writer(self):
        """ Load the selected writer plugin.

        Returns
        -------
        :mod:`plugins.convert.writer` plugin
            The requested writer plugin
        """
        args = [os.path.abspath(os.path.dirname(sys.argv[0]))]
        logger.debug("Writer args: %s", args)
        configfile = self._args.configfile if hasattr(self._args, "configfile") else None
        return PluginLoader.get_live("writer", self._args.writer)(*args,
                                                                       configfile=configfile)

    def _load_extractor(self):
        """ Load the CV2-DNN Face Extractor Chain.

        For On-The-Fly conversion we use a CPU based extractor to avoid stacking the GPU.
        Results are poor.

        Returns
        -------
        :class:`plugins.extract.Pipeline.Extractor`
            The face extraction chain to be used for on-the-fly conversion
        """

        logger.debug("Loading extractor")
        logger.warning("On-The-Fly conversion selected. This will use the inferior cv2-dnn for "
                       "extraction and will produce poor results.")
        logger.warning("It is recommended to generate an alignments file for your destination "
                       "video with Extract first for superior results.")
        extractor = Extractor(detector="cv2-dnn",
                              aligner="cv2-dnn",
                              masker="none",
                              multiprocess=True,
                              rotate_images=None,
                              min_size=20)
        extractor.launch()
        logger.debug("Loaded extractor")
        return extractor

    def _get_detected_faces(self, filename, image):
        """ Return the detected faces for the given image.

        If we have an alignments file, then the detected faces are created from that file. If
        we're running On-The-Fly then they will be extracted from the extractor.

        Parameters
        ----------
        filename: str
            The filename to return the detected faces for
        image: :class:`numpy.ndarray`
            The frame that the detected faces exist in

        Returns
        -------
        list
            List of :class:`lib.faces_detect.DetectedFace` objects
        """
        logger.trace("Getting faces for: '%s'", filename)
        if not self._extractor:
            detected_faces = self._alignments_faces(os.path.basename(filename), image)
        else:
            detected_faces = self._detect_faces(filename, image)
        logger.trace("Got %s faces for: '%s'", len(detected_faces), filename)
        return detected_faces

    def _detect_faces(self, filename, image):
        """ Extract the face from a frame for On-The-Fly conversion.

        Pulls detected faces out of the Extraction pipeline.

        Parameters
        ----------
        filename: str
            The filename to return the detected faces for
        image: :class:`numpy.ndarray`
            The frame that the detected faces exist in

        Returns
        -------
        list
            List of :class:`lib.faces_detect.DetectedFace` objects
         """
        self._extractor.input_queue.put(ExtractMedia(filename, image))
        faces = next(self._extractor.detected_faces())

        final_faces = [face for face in faces.detected_faces]
        return final_faces

    def _load_model(self):
        """ Load the Faceswap model.

        Returns
        -------
        :mod:`plugins.train.model` plugin
            The trained model in the specified model folder
        """
        logger.debug("Loading Model")
        model_dir = get_folder(self._args.model_dir, make_folder=False)
        if not model_dir:
            raise FaceswapError("{} does not exist.".format(self._args.model_dir))
        trainer = self._get_model_name(model_dir)
        gpus = 1 if not hasattr(self._args, "gpus") else self._args.gpus
        model = PluginLoader.get_model(trainer)(model_dir, gpus, predict=True)
        logger.debug("Loaded Model")
        return model

    def _get_model_name(self, model_dir):
        """ Return the name of the Faceswap model used.

        If a "trainer" option has been selected in the command line arguments, use that value,
        otherwise retrieve the name of the model from the model's state file.

        Parameters
        ----------
        model_dir: str
            The folder that contains the trained Faceswap model

        Returns
        -------
        str
            The name of the Faceswap model being used.

        """
        if hasattr(self._args, "trainer") and self._args.trainer:
            logger.debug("Trainer name provided: '%s'", self._args.trainer)
            return self._args.trainer

        statefile = [fname for fname in os.listdir(str(model_dir))
                     if fname.endswith("_state.json")]
        if len(statefile) != 1:
            raise FaceswapError("There should be 1 state file in your model folder. {} were "
                                "found. Specify a trainer with the '-t', '--trainer' "
                                "option.".format(len(statefile)))
        statefile = os.path.join(str(model_dir), statefile[0])

        state = self._serializer.load(statefile)
        trainer = state.get("name", None)

        if not trainer:
            raise FaceswapError("Trainer name could not be read from state file. "
                                "Specify a trainer with the '-t', '--trainer' option.")
        logger.debug("Trainer from state file: '%s'", trainer)
        return trainer

    def _alignments_faces(self, frame_name, image):
        """ Return detected faces from an alignments file.

        Parameters
        ----------
        frame_name: str
            The name of the frame to return the detected faces for
        image: :class:`numpy.ndarray`
            The frame that the detected faces exist in

        Returns
        -------
        list
            List of :class:`lib.faces_detect.DetectedFace` objects
        """
        if not self._check_alignments(frame_name):
            return list()

        faces = self._alignments.get_faces_in_frame(frame_name)
        detected_faces = list()

        for rawface in faces:
            face = DetectedFace()
            face.from_alignment(rawface, image=image)
            detected_faces.append(face)
        return detected_faces

    def _check_alignments(self, frame_name):
        """ Ensure that we have alignments for the current frame.

        If we have no alignments for this image, skip it and output a message.

        Parameters
        ----------
        frame_name: str
            The name of the frame to check that we have alignments for

        Returns
        -------
        bool
            ``True`` if we have alignments for this face, otherwise ``False``
        """
        have_alignments = self._alignments.frame_exists(frame_name)
        if not have_alignments:
            tqdm.write("No alignment found for {}, "
                       "skipping".format(frame_name))
        return have_alignments

    @property
    def _input_size(self):
        """ int: The size in pixels of the Faceswap model input. """
        return self._model.input_shape[0]

    @property
    def coverage_ratio(self):
        """ float: The coverage ratio that the model was trained at. """
        return self._model.training_opts["coverage_ratio"]

    @property
    def output_size(self):
        """ int: The size in pixels of the Faceswap model output. """
        return self._model.output_shape[0]

    def load_aligned(self, item):
        """ Load the model's feed faces and the reference output faces.

        For each detected face in the incoming item, load the feed face and reference face
        images, correctly sized for input and output respectively.

        Parameters
        ----------
        item: dict
            The incoming image and list of :class:`~lib.faces_detect.DetectedFace` objects

        """
        logger.trace("Loading aligned faces: '%s'", item["filename"])
        for detected_face in item["detected_faces"]:
            detected_face.load_feed_face(item["image"],
                                         size=self._input_size,
                                         coverage_ratio=self.coverage_ratio,
                                         dtype="float32")
            if self._input_size == self.output_size:
                detected_face.reference = detected_face.feed
            else:
                detected_face.load_reference_face(item["image"],
                                                  size=self.output_size,
                                                  coverage_ratio=self.coverage_ratio,
                                                  dtype="float32")
        logger.trace("Loaded aligned faces: '%s'", item["filename"])

    @staticmethod
    def _compile_feed_faces(detected_faces):
        """ Compile a batch of faces for feeding into the Predictor.

        Parameters
        ----------
        detected_faces: list
            List of `~lib.faces_detect.DetectedFace` objects

        Returns
        -------
        :class:`numpy.ndarray`
            A batch of faces ready for feeding into the Faceswap model.
        """
        logger.trace("Compiling feed face. Batchsize: %s", len(detected_faces))
        feed_faces = np.stack([detected_face.feed_face[..., :3]
                               for detected_face in detected_faces]) / 255.0
        logger.trace("Compiled Feed faces. Shape: %s", feed_faces.shape)
        return feed_faces

    def _predict(self, feed_faces, batch_size=None):
        """ Run the Faceswap models' prediction function.

        Parameters
        ----------
        feed_faces: :class:`numpy.ndarray`
            The batch to be fed into the model
        batch_size: int, optional
            Used for plaidml only. Indicates to the model what batch size is being processed.
            Default: ``None``

        Returns
        -------
        :class:`numpy.ndarray`
            The swapped faces for the given batch
        """
        logger.trace("Predicting: Batchsize: %s", len(feed_faces))
        feed = [feed_faces]
        if self._model.feed_mask:
            feed.append(np.repeat(self._input_mask, feed_faces.shape[0], axis=0))
        logger.trace("Input shape(s): %s", [item.shape for item in feed])

        predicted = self._predictor(feed, batch_size=batch_size)
        predicted = predicted if isinstance(predicted, list) else [predicted]
        logger.trace("Output shape(s): %s", [predict.shape for predict in predicted])

        predicted = self._filter_multi_out(predicted)

        # Compile masks into alpha channel or keep raw faces
        predicted = np.concatenate(predicted, axis=-1) if len(predicted) == 2 else predicted[0]
        predicted = predicted.astype("float32")

        logger.trace("Final shape: %s", predicted.shape)
        return predicted

    def _filter_multi_out(self, predicted):
        """ Filter the model output to just the required image.

        Some models have multi-scale outputs, so just make sure we take the largest
        output.

        Parameters
        ----------
        predicted: :class:`numpy.ndarray`
            The predictions retrieved from the Faceswap model.

        Returns
        -------
        :class:`numpy.ndarray`
            The predictions with any superfluous outputs removed.
        """
        if not predicted:
            return predicted
        face = predicted[self._output_indices["face"]]
        mask_idx = self._output_indices["mask"]
        mask = predicted[mask_idx] if mask_idx is not None else None
        predicted = [face, mask] if mask is not None else [face]
        logger.trace("Filtered output shape(s): %s", [predict.shape for predict in predicted])
        return predicted
Example #26
0
class Extract():
    """ The extract process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector,
                                   self.args.aligner,
                                   self.args.loglevel,
                                   self.args.multiprocess,
                                   self.args.rotate_images,
                                   self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def skip_num(self):
        """ Number of frames to skip if extract_every_n is passed """
        return self.args.extract_every_n if hasattr(self.args, "extract_every_n") else 1

    def process(self):
        """ Perform the extraction process """
        logger.info('Starting, this may take a while...')
        # queue_manager.debug_monitor(3)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction()
        save_thread.join()
        self.alignments.save()
        Utils.finalize(self.images.images_found // self.skip_num,
                       self.alignments.faces_count,
                       self.verify_output)

    def threaded_io(self, task, io_args=None):
        """ Perform I/O task in a background thread """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        return io_thread

    def load_images(self):
        """ Load the images """
        logger.debug("Load Images: Start")
        load_queue = self.extractor.input_queue
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if idx % self.skip_num != 0:
                logger.trace("Skipping image '%s' due to extract_every_n = %s",
                             filename, self.skip_num)
                continue
            if image is None or not image.any():
                logger.warning("Unable to open image. Skipping: '%s'", filename)
                continue
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                logger.trace("Skipping image: '%s'", filename)
                continue
            item = {"filename": filename,
                    "image": image}
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        logger.debug("Reload Images: Start. Detected Faces Count: %s", len(detected_faces))
        load_queue = self.extractor.input_queue
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def save_faces(self):
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        while True:
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            logger.trace(item)
            if item == "EOF":
                break
            filename, face = item

            logger.trace("Saving face: '%s'", filename)
            try:
                with open(filename, "wb") as out_file:
                    out_file.write(face)
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s", filename, err)
                continue
        logger.debug("Save Faces: Complete")

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(os.path.basename(frame) in self.alignments.data.keys()
                        for frame in self.images.input_images)
        logger.debug("Items already processed: %s", processed)

        if processed != 0 and self.args.skip_existing:
            logger.info("Skipping previously extracted frames: %s", processed)
        if processed != 0 and self.args.skip_faces:
            logger.info("Skipping frames with detected faces: %s", processed)

        to_process = (self.images.images_found - processed) // self.skip_num
        logger.debug("Items to be Processed: %s", to_process)
        if to_process == 0:
            logger.error("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_extraction(self):
        """ Run Face Detection """
        to_process = self.process_item_count()
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args, "align_eyes") else False
        exception = False

        for phase in range(self.extractor.passes):
            if exception:
                break
            is_final = self.extractor.final_pass
            detected_faces = dict()
            self.extractor.launch()
            for idx, faces in enumerate(tqdm(self.extractor.detected_faces(),
                                             total=to_process,
                                             file=sys.stdout,
                                             desc="Running pass {} of {}: {}".format(
                                                 phase + 1,
                                                 self.extractor.passes,
                                                 self.extractor.phase.title()))):

                exception = faces.get("exception", False)
                if exception:
                    break
                filename = faces["filename"]

                if self.extractor.final_pass:
                    self.output_processing(faces, align_eyes, size, filename)
                    self.output_faces(filename, faces)
                    if self.save_interval and idx + 1 % self.save_interval == 0:
                        self.alignments.save()
                else:
                    del faces["image"]
                    detected_faces[filename] = faces

            if is_final:
                logger.debug("Putting EOF to save")
                self.save_queue.put("EOF")
            else:
                logger.debug("Reloading images")
                self.threaded_io("reload", detected_faces)

    def output_processing(self, faces, align_eyes, size, filename):
        """ Prepare faces for output """
        self.align_face(faces, align_eyes, size, filename)
        self.post_process.do_actions(faces)

        faces_count = len(faces["detected_faces"])
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(filename))

        if not self.verify_output and faces_count > 1:
            self.verify_output = True

    def align_face(self, faces, align_eyes, size, filename):
        """ Align the detected face and add the destination file path """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_bounding_box(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.load_aligned(image, size=size, align_eyes=align_eyes)
            final_faces.append({"file_location": self.output_dir / Path(filename).stem,
                                "face": detected_face})
        faces["detected_faces"] = final_faces

    def output_faces(self, filename, faces):
        """ Output faces to save thread """
        final_faces = list()
        for idx, detected_face in enumerate(faces["detected_faces"]):
            output_file = detected_face["file_location"]
            extension = Path(filename).suffix
            out_filename = "{}_{}{}".format(str(output_file), str(idx), extension)

            face = detected_face["face"]
            resized_face = face.aligned_face

            face.hash, img = hash_encode_image(resized_face, extension)
            self.save_queue.put((out_filename, img))
            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces