Ejemplo n.º 1
0
    def update_landmarks(self):
        """ Update the landmarks """
        queue_manager.get_queue("in").put({"image": self.media["image"],
                                           "filename": self.media["frame_id"],
                                           "face": self.media["bounding_box"]})
        landmarks = queue_manager.get_queue("out").get()

        if isinstance(landmarks, dict) and landmarks.get("exception"):
            cv2.destroyAllWindows()  # pylint: disable=no-member
            pid = landmarks["exception"][0]
            t_back = landmarks["exception"][1].getvalue()
            err = "Error in child process {}. {}".format(pid, t_back)
            raise Exception(err)
        if landmarks == "EOF":
            exit(0)

        alignment = self.extracted_to_alignment((landmarks["detected_faces"][0],
                                                 landmarks["landmarks"][0]))
        frame = self.media["frame_id"]

        if self.interface.get_selected_face_id() is None:
            idx = self.alignments.add_face(frame, alignment)
            self.interface.set_state_value("edit", "selected", idx)
        else:
            self.alignments.update_face(frame,
                                        self.interface.get_selected_face_id(),
                                        alignment)
            self.interface.set_redraw(True)

        self.interface.state["edit"]["updated"] = True
        self.interface.state["edit"]["update_faces"] = True
Ejemplo n.º 2
0
 def get_landmarks(filename):
     """ Extract the face from a frame (If not alignments file found) """
     image = cv2.imread(filename)
     queue_manager.get_queue("in").put(Sort.alignment_dict(image))
     face = queue_manager.get_queue("out").get()
     landmarks = face["landmarks"][0]
     return landmarks
Ejemplo n.º 3
0
    def launch_aligner(self):
        """ Load the aligner plugin to retrieve landmarks """
        out_queue = queue_manager.get_queue("out")
        kwargs = {"in_queue": queue_manager.get_queue("in"),
                  "out_queue": out_queue}

        for plugin in ("fan", "cv2_dnn"):
            aligner = PluginLoader.get_aligner(plugin)(loglevel=self.args.loglevel)
            process = SpawnProcess(aligner.run, **kwargs)
            event = process.event
            process.start()
            # Wait for Aligner to take init
            # The first ever load of the model for FAN has reportedly taken
            # up to 3-4 minutes, hence high timeout.
            event.wait(300)

            if not event.is_set():
                if plugin == "fan":
                    process.join()
                    logger.error("Error initializing FAN. Trying CV2-DNN")
                    continue
                else:
                    raise ValueError("Error inititalizing Aligner")
            if plugin == "cv2_dnn":
                return

            try:
                err = None
                err = out_queue.get(True, 1)
            except QueueEmpty:
                pass
            if not err:
                break
            process.join()
            logger.error("Error initializing FAN. Trying CV2-DNN")
Ejemplo n.º 4
0
    def init_extractor(self, loglevel):
        """ Initialize FAN """
        logger.debug("Initialize Extractor")
        out_queue = queue_manager.get_queue("out")

        d_kwargs = {"in_queue": queue_manager.get_queue("in"),
                    "out_queue": queue_manager.get_queue("align")}
        a_kwargs = {"in_queue": queue_manager.get_queue("align"),
                    "out_queue": out_queue}

        detector = PluginLoader.get_detector("manual")(loglevel=loglevel)
        detect_process = SpawnProcess(detector.run, **d_kwargs)
        d_event = detect_process.event
        detect_process.start()

        for plugin in ("fan", "cv2_dnn"):
            aligner = PluginLoader.get_aligner(plugin)(loglevel=loglevel)
            align_process = SpawnProcess(aligner.run, **a_kwargs)
            a_event = align_process.event
            align_process.start()

            # Wait for Aligner to initialize
            # The first ever load of the model for FAN has reportedly taken
            # up to 3-4 minutes, hence high timeout.
            a_event.wait(300)
            if not a_event.is_set():
                if plugin == "fan":
                    align_process.join()
                    logger.error("Error initializing FAN. Trying CV2-DNN")
                    continue
                else:
                    raise ValueError("Error inititalizing Aligner")
            if plugin == "cv2_dnn":
                break

            try:
                err = None
                err = out_queue.get(True, 1)
            except QueueEmpty:
                pass
            if not err:
                break
            align_process.join()
            logger.error("Error initializing FAN. Trying CV2-DNN")

        d_event.wait(10)
        if not d_event.is_set():
            raise ValueError("Error inititalizing Detector")

        self.extractor["detect"] = detector
        self.extractor["align"] = aligner
        logger.debug("Initialized Extractor")
Ejemplo n.º 5
0
 def make_queues(side, is_timelapse):
     """ Create the buffer token queues for Fixed Producer Dispatcher """
     q_name = "timelapse_{}".format(side) if is_timelapse else "train_{}".format(side)
     q_names = ["{}_{}".format(q_name, direction) for direction in ("in", "out")]
     logger.debug(q_names)
     queues = [queue_manager.get_queue(queue) for queue in q_names]
     return queues
Ejemplo n.º 6
0
    def convert_images(self):
        """ Convert the images """
        logger.debug("Converting images")
        save_queue = queue_manager.get_queue("convert_out")
        patch_queue = queue_manager.get_queue("patch")
        pool = PoolProcess(self.converter.process, patch_queue, save_queue,
                           processes=self.pool_processes)
        pool.start()
        while True:
            self.check_thread_error()
            if self.disk_io.completion_event.is_set():
                break
            sleep(1)
        pool.join()

        save_queue.put("EOF")
        logger.debug("Converted images")
Ejemplo n.º 7
0
 def add_queue(self, task):
     """ Add the queue to queue_manager and set queue attribute """
     logger.debug("Adding queue for task: '%s'", task)
     if task == "load":
         q_name = "convert_in"
     elif task == "save":
         q_name = "convert_out"
     else:
         q_name = task
     setattr(self, "{}_queue".format(task), queue_manager.get_queue(q_name))
     logger.debug("Added queue for task: '%s'", task)
Ejemplo n.º 8
0
 def add_queues(self):
     """ Add the required processing queues to Queue Manager """
     queues = dict()
     for task in ("extract_detect_in", "extract_align_in", "extract_align_out"):
         # Limit queue size to avoid stacking ram
         size = 32
         if task == "extract_detect_in" or (not self.is_parallel
                                            and task == "extract_align_in"):
             size = 64
         queue_manager.add_queue(task, maxsize=size)
         queues[task] = queue_manager.get_queue(task)
     logger.debug("Queues: %s", queues)
     return queues
Ejemplo n.º 9
0
    def launch_detector(self):
        """ Launch the face detector """
        logger.debug("Launching Detector")
        out_queue = queue_manager.get_queue("detect")
        kwargs = {
            "in_queue": queue_manager.get_queue("load"),
            "out_queue": out_queue
        }
        if self.converter_args:
            kwargs["processes"] = 1
        mp_func = PoolProcess if self.detector.parent_is_pool else SpawnProcess
        self.process_detect = mp_func(self.detector.run, **kwargs)

        event = self.process_detect.event if hasattr(self.process_detect,
                                                     "event") else None
        error = self.process_detect.error if hasattr(self.process_detect,
                                                     "error") else None
        self.process_detect.start()

        if event is None:
            logger.debug("Launched Detector")
            return

        for mins in reversed(range(5)):
            for seconds in range(60):
                event.wait(seconds)
                if event.is_set():
                    break
                if error and error.is_set():
                    break
            if event.is_set():
                break
            if mins == 0 or (error and error.is_set()):
                raise ValueError("Error initializing Detector")
            logger.info("Waiting for Detector... Time out in %s minutes", mins)

        logger.debug("Launched Detector")
Ejemplo n.º 10
0
    def launch_aligner():
        """ Load the aligner plugin to retrieve landmarks """
        out_queue = queue_manager.get_queue("out")
        kwargs = {
            "in_queue": queue_manager.get_queue("in"),
            "out_queue": out_queue
        }

        for plugin in ("fan", "dlib"):
            aligner = PluginLoader.get_aligner(plugin)()
            process = SpawnProcess(aligner.run, **kwargs)
            event = process.event
            process.start()
            # Wait for Aligner to take init
            # The first ever load of the model for FAN has reportedly taken
            # up to 3-4 minutes, hence high timeout.
            event.wait(300)

            if not event.is_set():
                if plugin == "fan":
                    process.join()
                    logger.error("Error initializing FAN. Trying Dlib")
                    continue
                else:
                    raise ValueError("Error inititalizing Aligner")
            if plugin == "dlib":
                return

            try:
                err = None
                err = out_queue.get(True, 1)
            except QueueEmpty:
                pass
            if not err:
                break
            process.join()
            logger.error("Error initializing FAN. Trying Dlib")
Ejemplo n.º 11
0
    def load_extractor(self):
        """ Set on the fly extraction """
        print("\nNo Alignments file found. Extracting on the fly.\n"
              "NB: This will use the inferior dlib-hog for extraction "
              "and dlib pose predictor for landmarks.\nIt is recommended "
              "to perfom Extract first for superior results\n")
        for task in ("load", "detect", "align"):
            queue_manager.add_queue(task, maxsize=0)

        detector = PluginLoader.get_detector("dlib_hog")(
            verbose=self.args.verbose)
        aligner = PluginLoader.get_aligner("dlib")(verbose=self.args.verbose)

        d_kwargs = {
            "in_queue": queue_manager.get_queue("load"),
            "out_queue": queue_manager.get_queue("detect")
        }
        a_kwargs = {
            "in_queue": queue_manager.get_queue("detect"),
            "out_queue": queue_manager.get_queue("align")
        }

        d_process = SpawnProcess()
        d_event = d_process.event
        a_process = SpawnProcess()
        a_event = a_process.event

        d_process.in_process(detector.detect_faces, **d_kwargs)
        a_process.in_process(aligner.align, **a_kwargs)
        d_event.wait(10)
        if not d_event.is_set():
            raise ValueError("Error inititalizing Detector")
        a_event.wait(10)
        if not a_event.is_set():
            raise ValueError("Error inititalizing Aligner")

        self.extract_faces = True
Ejemplo n.º 12
0
 def make_queues(side, is_preview, is_timelapse):
     """ Create the buffer token queues for Fixed Producer Dispatcher """
     q_name = "_{}".format(side)
     if is_preview:
         q_name = "{}{}".format("preview", q_name)
     elif is_timelapse:
         q_name = "{}{}".format("timelapse", q_name)
     else:
         q_name = "{}{}".format("train", q_name)
     q_names = [
         "{}_{}".format(q_name, direction) for direction in ("in", "out")
     ]
     logger.debug(q_names)
     queues = [queue_manager.get_queue(queue) for queue in q_names]
     return queues
Ejemplo n.º 13
0
    def launch_aligner(self):
        """ Launch the face aligner """
        logger.debug("Launching Aligner")
        out_queue = queue_manager.get_queue("align")
        kwargs = {"in_queue": queue_manager.get_queue("detect"),
                  "out_queue": out_queue}

        self.process_align = SpawnProcess(self.aligner.run, **kwargs)
        event = self.process_align.event
        self.process_align.start()

        # Wait for Aligner to take it's VRAM
        # The first ever load of the model for FAN has reportedly taken
        # up to 3-4 minutes, hence high timeout.
        # TODO investigate why this is and fix if possible
        for mins in reversed(range(5)):
            event.wait(60)
            if event.is_set():
                break
            if mins == 0:
                raise ValueError("Error initializing Aligner")
            logger.info("Waiting for Aligner... Time out in %s minutes", mins)

        logger.debug("Launched Aligner")
Ejemplo n.º 14
0
    def launch_detector(self):
        """ Launch the face detector """
        logger.debug("Launching Detector")
        out_queue = queue_manager.get_queue("detect")
        kwargs = {
            "in_queue": queue_manager.get_queue("load"),
            "out_queue": out_queue
        }

        if self.args.detector == "mtcnn":
            mtcnn_kwargs = self.detector.validate_kwargs(
                self.get_mtcnn_kwargs())
            kwargs["mtcnn_kwargs"] = mtcnn_kwargs

        mp_func = PoolProcess if self.detector.parent_is_pool else SpawnProcess
        self.process_detect = mp_func(self.detector.run, **kwargs)

        event = None
        if hasattr(self.process_detect, "event"):
            event = self.process_detect.event

        self.process_detect.start()

        if event is None:
            logger.debug("Launched Detector")
            return

        for mins in reversed(range(5)):
            event.wait(60)
            if event.is_set():
                break
            if mins == 0:
                raise ValueError("Error initializing Detector")
            logger.info("Waiting for Detector... Time out in %s minutes", mins)

        logger.debug("Launched Detector")
Ejemplo n.º 15
0
    def update_landmarks(self):
        """ Update the landmarks """
        queue_manager.get_queue("in").put({
            "image": self.media["image"],
            "face": self.media["bounding_box"]
        })
        landmarks = queue_manager.get_queue("out").get()
        if landmarks == "EOF":
            exit(0)
        alignment = self.extracted_to_alignment(
            (landmarks["detected_faces"][0], landmarks["landmarks"][0]))
        frame = self.media["frame_id"]

        if self.interface.get_selected_face_id() is None:
            idx = self.alignments.add_face(frame, alignment)
            self.interface.set_state_value("edit", "selected", idx)
        else:
            self.alignments.update_face(frame,
                                        self.interface.get_selected_face_id(),
                                        alignment)
            self.interface.set_redraw(True)

        self.interface.state["edit"]["updated"] = True
        self.interface.state["edit"]["update_faces"] = True
Ejemplo n.º 16
0
    def save_faces(self):
        """ Save the generated faces """
        if not self.export_face:
            return

        save_queue = queue_manager.get_queue("save")
        while True:
            item = save_queue.get()
            if item == "EOF":
                break
            filename, output_file, resized_face, idx = item
            out_filename = "{}_{}{}".format(str(output_file), str(idx),
                                            Path(filename).suffix)
            # pylint: disable=no-member
            cv2.imwrite(out_filename, resized_face)
Ejemplo n.º 17
0
 def _add_queues(self):
     """ Add the required processing queues to Queue Manager """
     queues = dict()
     tasks = [
         "extract{}_{}_in".format(self._instance, phase)
         for phase in self._flow
     ]
     tasks.append("extract{}_{}_out".format(self._instance,
                                            self._final_phase))
     for task in tasks:
         # Limit queue size to avoid stacking ram
         queue_manager.add_queue(task, maxsize=self._queue_size)
         queues[task] = queue_manager.get_queue(task)
     logger.debug("Queues: %s", queues)
     return queues
Ejemplo n.º 18
0
    def _convert_images(self):
        """ Start the multi-threaded patching process, monitor all threads for errors and join on
        completion. """
        logger.debug("Converting images")
        save_queue = queue_manager.get_queue("convert_out")
        patch_queue = queue_manager.get_queue("patch")
        self._patch_threads = MultiThread(self._converter.process, patch_queue, save_queue,
                                          thread_count=self._pool_processes, name="patch")

        self._patch_threads.start()
        while True:
            self._check_thread_error()
            if self._disk_io.completion_event.is_set():
                logger.debug("DiskIO completion event set. Joining Pool")
                break
            if self._patch_threads.completed():
                logger.debug("All patch threads completed")
                break
            sleep(1)
        self._patch_threads.join()

        logger.debug("Putting EOF")
        save_queue.put("EOF")
        logger.debug("Converted images")
Ejemplo n.º 19
0
 def _add_queues(self):
     """ Add the required processing queues to Queue Manager """
     queues = dict()
     tasks = ["extract_{}_in".format(phase) for phase in self._flow]
     tasks.append("extract_{}_out".format(self._final_phase))
     for task in tasks:
         # Limit queue size to avoid stacking ram
         self._queue_size = 32
         if task == "extract_{}_in".format(
                 self._flow[0]) or (not self._is_parallel
                                    and not task.endswith("_out")):
             self._queue_size = 64
         queue_manager.add_queue(task, maxsize=self._queue_size)
         queues[task] = queue_manager.get_queue(task)
     logger.debug("Queues: %s", queues)
     return queues
Ejemplo n.º 20
0
 def load_images(self):
     """ Load the images """
     logger.debug("Load Images: Start")
     load_queue = queue_manager.get_queue("load")
     for filename, image in self.images.load():
         if load_queue.shutdown.is_set():
             logger.debug("Load Queue: Stop signal received. Terminating")
             break
         imagename = os.path.basename(filename)
         if imagename in self.alignments.data.keys():
             logger.trace("Skipping image: '%s'", filename)
             continue
         item = {"filename": filename, "image": image}
         load_queue.put(item)
     load_queue.put("EOF")
     logger.debug("Load Images: Complete")
Ejemplo n.º 21
0
    def process_faces(self, filename, faces):
        """ Perform processing on found faces """
        final_faces = list()
        save_queue = queue_manager.get_queue("save")
        filename = faces["filename"]
        output_file = faces["output_file"]

        for idx, face in enumerate(faces["detected_faces"]):
            if self.export_face:
                save_queue.put((filename,
                                output_file,
                                face.aligned_face,
                                idx))

            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Ejemplo n.º 22
0
    def _add_queue(self, task):
        """ Add the queue to queue_manager and to :attr:`self._queues` for the given task.

        Parameters
        ----------
        task: {"load", "save"}
            The task that the queue is to be added for
        """
        logger.debug("Adding queue for task: '%s'", task)
        if task == "load":
            q_name = "convert_in"
        elif task == "save":
            q_name = "convert_out"
        else:
            q_name = task
        self._queues[task] = queue_manager.get_queue(q_name)
        logger.debug("Added queue for task: '%s'", task)
Ejemplo n.º 23
0
    def run_extraction(self, save_thread):
        """ Run Face Detection """
        save_queue = queue_manager.get_queue("save")
        to_process = self.process_item_count()
        frame_no = 0
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args,
                                                     "align_eyes") else False

        if self.plugins.is_parallel:
            logger.debug("Using parallel processing")
            self.plugins.launch_aligner()
            self.plugins.launch_detector()
        if not self.plugins.is_parallel:
            logger.debug("Using serial processing")
            self.run_detection(to_process)
            self.plugins.launch_aligner()

        for faces in tqdm(self.plugins.detect_faces(extract_pass="******"),
                          total=to_process,
                          file=sys.stdout,
                          desc="Extracting faces"):

            filename = faces["filename"]

            self.align_face(faces, align_eyes, size, filename)
            self.post_process.do_actions(faces)

            faces_count = len(faces["detected_faces"])
            if faces_count == 0:
                logger.verbose("No faces were detected in image: %s",
                               os.path.basename(filename))

            if not self.verify_output and faces_count > 1:
                self.verify_output = True

            self.process_faces(filename, faces, save_queue)

            frame_no += 1
            if frame_no == self.save_interval:
                self.alignments.save()
                frame_no = 0

        if self.export_face:
            save_queue.put("EOF")
        save_thread.join()
Ejemplo n.º 24
0
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug("Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
                     self.__class__.__name__, arguments, queue_size, in_queue)
        self.batchsize = self.get_batchsize(queue_size)
        self.args = arguments
        self.in_queue = in_queue
        self.out_queue = queue_manager.get_queue("patch")
        self.serializer = Serializer.get_serializer("json")
        self.faces_count = 0
        self.verify_output = False
        self.model = self.load_model()
        self.predictor = self.model.converter(self.args.swap_model)
        self.queues = dict()

        self.thread = MultiThread(self.predict_faces, thread_count=1)
        self.thread.start()
        logger.debug("Initialized %s: (out_queue: %s)", self.__class__.__name__, self.out_queue)
Ejemplo n.º 25
0
 def reload_images(self, detected_faces):
     """ Reload the images and pair to detected face """
     logger.debug("Reload Images: Start. Detected Faces Count: %s", len(detected_faces))
     load_queue = queue_manager.get_queue("detect")
     for filename, image in self.images.load():
         if load_queue.shutdown.is_set():
             logger.debug("Reload Queue: Stop signal received. Terminating")
             break
         logger.trace("Reloading image: '%s'", filename)
         detect_item = detected_faces.pop(filename, None)
         if not detect_item:
             logger.warning("Couldn't find faces for: %s", filename)
             continue
         detect_item["image"] = image
         load_queue.put(detect_item)
     load_queue.put("EOF")
     logger.debug("Reload Images: Complete")
Ejemplo n.º 26
0
    def __init__(self, path, task, load_with_hash=False, queue_size=16):
        logger.debug(
            "Initializing %s: (path: %s, task: %s, load_with_hash: %s, queue_size: %s)",
            self.__class__.__name__, path, task, load_with_hash, queue_size)
        self._location = path

        self._task = task.lower()
        self._is_video = self._check_input()
        self._input = self.location if self._is_video else get_image_paths(
            self.location)
        self._count = count_frames_and_secs(
            self._input)[0] if self._is_video else len(self._input)
        self._queue = queue_manager.get_queue(name="{}_{}".format(
            self.__class__.__name__, self._task),
                                              maxsize=queue_size)
        self._thread = self._set_thread(io_args=(load_with_hash, ))
        self._thread.start()
Ejemplo n.º 27
0
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug("Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
                     self.__class__.__name__, arguments, queue_size, in_queue)
        self.batchsize = min(queue_size, 16)
        self.args = arguments
        self.in_queue = in_queue
        self.out_queue = queue_manager.get_queue("patch")
        self.serializer = Serializer.get_serializer("json")
        self.faces_count = 0
        self.verify_output = False
        self.model = self.load_model()
        self.predictor = self.model.converter(self.args.swap_model)
        self.queues = dict()

        self.thread = MultiThread(self.predict_faces, thread_count=1)
        self.thread.start()
        logger.debug("Initialized %s: (out_queue: %s)", self.__class__.__name__, self.out_queue)
Ejemplo n.º 28
0
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug(
            "Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
            self.__class__.__name__, arguments, queue_size, in_queue)
        self._batchsize = self._get_batchsize(queue_size)
        self._args = arguments
        self._in_queue = in_queue
        self._out_queue = queue_manager.get_queue("patch")
        self._serializer = get_serializer("json")
        self._faces_count = 0
        self._verify_output = False

        self._model = self._load_model()
        self._sizes = self._get_io_sizes()
        self._coverage_ratio = self._model.coverage_ratio

        self._thread = self._launch_predictor()
        logger.debug("Initialized %s: (out_queue: %s)",
                     self.__class__.__name__, self._out_queue)
Ejemplo n.º 29
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector, self.args.aligner,
                                   self.args.loglevel, self.args.multiprocess,
                                   self.args.rotate_images, self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 30
0
    def save_faces():
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        save_queue = queue_manager.get_queue("save")
        while True:
            if save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = save_queue.get()
            if item == "EOF":
                break
            filename, face = item

            logger.trace("Saving face: '%s'", filename)
            try:
                with open(filename, "wb") as out_file:
                    out_file.write(face)
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s", filename, err)
                continue
        logger.debug("Save Faces: Complete")
Ejemplo n.º 31
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector,
                                   self.args.aligner,
                                   self.args.loglevel,
                                   self.args.multiprocess,
                                   self.args.rotate_images,
                                   self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 32
0
 def minibatch(self, q_name, load_thread):
     """ A generator function that yields epoch, batchsize of warped_img
         and batchsize of target_img from the load queue """
     logger.debug("Launching minibatch generator for queue: '%s'", q_name)
     queue = queue_manager.get_queue(q_name)
     while True:
         if load_thread.has_error:
             logger.debug("Thread error detected")
             break
         batch = list()
         for _ in range(self.batchsize):
             images = queue.get()
             for idx, image in enumerate(images):
                 if len(batch) < idx + 1:
                     batch.append(list())
                 batch[idx].append(image)
         batch = [np.float32(image) for image in batch]
         logger.trace(
             "Yielding batch: (size: %s, item shapes: %s, queue:  '%s'",
             len(batch), [item.shape for item in batch], q_name)
         yield batch
     logger.debug("Finished minibatch generator for queue: '%s'", q_name)
     load_thread.join()
Ejemplo n.º 33
0
 def process(self, trigger_event, shutdown_event, patch_queue_in, samples, tk_vars):
     """ Wait for event trigger and run when process when set """
     patch_queue_out = queue_manager.get_queue("preview_patch_out")
     while True:
         trigger = trigger_event.wait(1)
         if shutdown_event.is_set():
             logger.debug("Shutdown received")
             break
         if not trigger:
             continue
         # Clear trigger so calling process can set it during this run
         trigger_event.clear()
         tk_vars["busy"].set(True)
         queue_manager.flush_queue("preview_patch_in")
         self.feed_swapped_faces(patch_queue_in, samples)
         with self.lock:
             self.update_converter_arguments()
             self.converter.reinitialize(config=self.current_config)
         swapped = self.patch_faces(patch_queue_in, patch_queue_out, samples.sample_size)
         with self.lock:
             self.display.destination = swapped
         tk_vars["refresh"].set(True)
         tk_vars["busy"].set(False)
Ejemplo n.º 34
0
    def __init__(self, arguments, samples, display, lock, trigger,
                 config_tools, tk_vars):
        logger.debug(
            "Initializing %s: (arguments: '%s', samples: %s: display: %s, lock: %s,"
            " trigger: %s, config_tools: %s, tk_vars %s)",
            self.__class__.__name__, arguments, samples, display, lock,
            trigger, config_tools, tk_vars)
        self.samples = samples
        self.queue_patch_in = queue_manager.get_queue("preview_patch_in")
        self.display = display
        self.lock = lock
        self.trigger = trigger
        self.current_config = config_tools.config
        self.converter_arguments = None  # Updated converter arguments dict

        configfile = arguments.configfile if hasattr(arguments,
                                                     "configfile") else None
        self.converter = Converter(
            output_dir=None,
            output_size=self.samples.predictor.output_size,
            output_has_mask=self.samples.predictor.has_predicted_mask,
            draw_transparent=False,
            pre_encode=None,
            configfile=configfile,
            arguments=self.generate_converter_arguments(arguments))

        self.shutdown = Event()

        self.thread = MultiThread(self.process,
                                  self.trigger,
                                  self.shutdown,
                                  self.queue_patch_in,
                                  self.samples,
                                  tk_vars,
                                  thread_count=1,
                                  name="patch_thread")
        self.thread.start()
Ejemplo n.º 35
0
    def __init__(self, arguments, sample_size, display, lock, trigger_patch):
        logger.debug("Initializing %s: (arguments: '%s', sample_size: %s, display: %s, lock: %s, "
                     "trigger_patch: %s)", self.__class__.__name__, arguments, sample_size,
                     display, lock, trigger_patch)
        self.sample_size = sample_size
        self.display = display
        self.lock = lock
        self.trigger_patch = trigger_patch
        self.input_images = list()
        self.predicted_images = list()

        self.images = Images(arguments)
        self.alignments = Alignments(arguments,
                                     is_extract=False,
                                     input_is_video=self.images.is_video)
        self.filelist = self.get_filelist()
        self.indices = self.get_indices()

        self.predictor = Predict(queue_manager.get_queue("preview_predict_in"),
                                 sample_size,
                                 arguments)
        self.generate()

        logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 36
0
    def patch_iterator(self, processes):
        """ Prepare the images for conversion """
        out_queue = queue_manager.get_queue("out")
        completed = 0

        while True:
            try:
                item = out_queue.get(True, 1)
            except QueueEmpty:
                self.check_thread_error()
                continue
            self.check_thread_error()

            if item == "EOF":
                completed += 1
                logger.debug("Got EOF %s of %s", completed, processes)
                if completed == processes:
                    break
                continue

            logger.trace("Yielding: '%s'", item[0])
            yield item
        logger.debug("iterator exhausted")
        return "EOF"
Ejemplo n.º 37
0
    def patch_iterator(self, processes):
        """ Prepare the images for conversion """
        out_queue = queue_manager.get_queue("out")
        completed = 0

        while True:
            try:
                item = out_queue.get(True, 1)
            except QueueEmpty:
                self.check_thread_error()
                continue
            self.check_thread_error()

            if item == "EOF":
                completed += 1
                logger.debug("Got EOF %s of %s", completed, processes)
                if completed == processes:
                    break
                continue

            logger.trace("Yielding: '%s'", item[0])
            yield item
        logger.debug("iterator exhausted")
        return "EOF"
Ejemplo n.º 38
0
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug(
            "Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
            self.__class__.__name__, arguments, queue_size, in_queue)
        self._batchsize = self._get_batchsize(queue_size)
        self._args = arguments
        self._in_queue = in_queue
        self._out_queue = queue_manager.get_queue("patch")
        self._serializer = get_serializer("json")
        self._faces_count = 0
        self._verify_output = False

        if arguments.allow_growth:
            self._set_tf_allow_growth()

        self._model = self._load_model()
        self._output_indices = {
            "face": self._model.largest_face_index,
            "mask": self._model.largest_mask_index
        }
        self._predictor = self._model.converter(self._args.swap_model)
        self._thread = self._launch_predictor()
        logger.debug("Initialized %s: (out_queue: %s)",
                     self.__class__.__name__, self._out_queue)
Ejemplo n.º 39
0
 def launch_aligner(self):
     """ Load the aligner plugin to retrieve landmarks """
     out_queue = queue_manager.get_queue("out")
     kwargs = {"in_queue": queue_manager.get_queue("in"),
               "out_queue": out_queue}
Ejemplo n.º 40
0
        face = face.to_dlib_rect()
=======
        face = face.to_bounding_box_dict()
>>>>>>> upstream/master
        return {"image": image,
                "detected_faces": [face]}

    @staticmethod
    def get_landmarks(filename):
        """ Extract the face from a frame (If not alignments file found) """
<<<<<<< HEAD
        image = cv2.imread(filename)
=======
        image = cv2_read_img(filename, raise_error=True)
>>>>>>> upstream/master
        queue_manager.get_queue("in").put(Sort.alignment_dict(image))
        face = queue_manager.get_queue("out").get()
        landmarks = face["landmarks"][0]
        return landmarks

    def sort_process(self):
        """
        This method dynamically assigns the functions that will be used to run
        the core process of sorting, optionally grouping, renaming/moving into
        folders. After the functions are assigned they are executed.
        """
        sort_method = self.args.sort_method.lower()
        group_method = self.args.group_method.lower()
        final_method = self.args.final_process.lower()

        img_list = getattr(self, sort_method)()
Ejemplo n.º 41
0
 def get_landmarks(self, filename):
     """ Get landmarks for current image """
     image = cv2.imread(filename)
     queue_manager.get_queue("in").put(self.alignment_dict(image))
     face = queue_manager.get_queue("out").get()
     return face["detected_faces"][0].landmarksXY