Ejemplo n.º 1
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._patch_threads = None
        self._images = ImagesLoader(self._args.input_dir, fast_count=True)
        self._alignments = Alignments(self._args, False, self._images.is_video)

        self._opts = OptionalActions(self._args, self._images.file_list,
                                     self._alignments)

        self._add_queues()
        self._disk_io = DiskIO(self._alignments, self._images, arguments)
        self._predictor = Predict(self._disk_io.load_queue, self._queue_size,
                                  arguments)
        self._validate()
        get_folder(self._args.output_dir)

        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        self._converter = Converter(self._predictor.output_size,
                                    self._predictor.coverage_ratio,
                                    self._disk_io.draw_transparent,
                                    self._disk_io.pre_encode,
                                    arguments,
                                    configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 2
0
    def handleImage(self, image, filename):
        faces = self.get_faces(image)
        process_faces = [(idx, face) for idx, face in faces]

        # Run image rotator if requested and no faces found
        if self.arguments.rotate_images.lower() == 'on' and len(
                process_faces) == 0:
            process_faces, image = self.imageRotator(image)

        rvals = []
        for idx, face in process_faces:
            # Draws landmarks for debug
            if self.arguments.debug_landmarks:
                for (x, y) in face.landmarksAsXY():
                    cv2.circle(image, (x, y), 2, (0, 0, 255), -1)

            resized_image, t_mat = self.extractor.extract(
                image, face, 256, self.arguments.align_eyes)
            output_file = get_folder(self.output_dir) / Path(filename).stem

            # Detect blurry images
            if self.arguments.blur_thresh is not None:
                aligned_landmarks = self.extractor.transform_points(
                    face.landmarksAsXY(), t_mat, 256, 48)
                feature_mask = self.extractor.get_feature_mask(
                    aligned_landmarks / 256, 256, 48)
                feature_mask = cv2.blur(feature_mask, (10, 10))
                isolated_face = cv2.multiply(
                    feature_mask, resized_image.astype(float)).astype(np.uint8)
                blurry, focus_measure = is_blurry(isolated_face,
                                                  self.arguments.blur_thresh)
                # print("{} focus measure: {}".format(Path(filename).stem, focus_measure))
                # cv2.imshow("Isolated Face", isolated_face)
                # cv2.waitKey(0)
                # cv2.destroyAllWindows()
                if blurry:
                    print(
                        "{}'s focus measure of {} was below the blur threshold, moving to \"blurry\""
                        .format(Path(filename).stem, focus_measure))
                    output_file = get_folder(
                        Path(self.output_dir) /
                        Path("blurry")) / Path(filename).stem

            cv2.imwrite(
                '{}_{}{}'.format(str(output_file), str(idx),
                                 Path(filename).suffix), resized_image)
            f = {
                "r": face.r,
                "x": face.x,
                "w": face.w,
                "y": face.y,
                "h": face.h,
                "landmarksXY": face.landmarksAsXY()
            }
            rvals.append(f)
        return rvals
Ejemplo n.º 3
0
 def load_model(self):
     """ Load the model requested for training """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir)
     configfile = self.args.configfile if hasattr(self.args,
                                                  "configfile") else None
     model = PluginLoader.get_model(self.trainer_name)(
         model_dir,
         self.args.gpus,
         configfile=configfile,
         no_logs=self.args.no_logs,
         warp_to_landmarks=self.args.warp_to_landmarks,
         augment_color=self.args.augment_color,
         no_flip=self.args.no_flip,
         training_image_size=self.image_size,
         alignments_paths=self.alignments_paths,
         preview_scale=self.args.preview_scale,
         pingpong=self.args.pingpong,
         memory_saving_gradients=self.args.memory_saving_gradients,
         predict=False,
         num_of_sides=len(
             self.args.input_a
         )  #--------------------------------------------------------------------# 
     )
     logger.debug("Loaded Model")
     return model
Ejemplo n.º 4
0
    def _background_extract(self, output_folder, progress_queue):
        """ Perform the background extraction in a thread so GUI doesn't become unresponsive.

        Parameters
        ----------
        output_folder: str
            The location to save the output faces to
        progress_queue: :class:`queue.Queue`
            The queue to place incremental counts to for updating the GUI's progress bar
        """
        _io = dict(saver=ImagesSaver(str(get_folder(output_folder)), as_bytes=True),
                   loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))

        for frame_idx, (filename, image) in enumerate(_io["loader"].load()):
            logger.trace("Outputting frame: %s: %s", frame_idx, filename)
            src_filename = os.path.basename(filename)
            frame_name = os.path.splitext(src_filename)[0]
            progress_queue.put(1)

            for face_idx, face in enumerate(self._frame_faces[frame_idx]):
                output = "{}_{}{}".format(frame_name, str(face_idx), ".png")
                aligned = AlignedFace(face.landmarks_xy,
                                      image=image,
                                      centering="head",
                                      size=512)  # TODO user selectable size
                meta = dict(alignments=face.to_png_meta(),
                            source=dict(alignments_version=self._alignments.version,
                                        original_filename=output,
                                        face_index=face_idx,
                                        source_filename=src_filename,
                                        source_is_video=self._globals.is_video))

                b_image = encode_image(aligned.face, ".png", metadata=meta)
                _io["saver"].save(output, b_image)
        _io["saver"].close()
Ejemplo n.º 5
0
    def _setup(self, input_a=None, input_b=None, output=None):
        """ Setup the time-lapse folder locations and the time-lapse feed.

        Parameters
        ----------
        input_a: str
            The full path to the time-lapse input folder containing faces for the "a" side
        input_b: str
            The full path to the time-lapse input folder containing faces for the "b" side
        output: str, optional
            The full path to the time-lapse output folder. If ``None`` is provided this will
            default to the model folder
        """
        logger.debug("Setting up time-lapse")
        if output is None:
            output = get_folder(
                os.path.join(str(self._model.model_dir),
                             f"{self._model.name}_timelapse"))
        self._output_file = str(output)
        logger.debug("Time-lapse output set to '%s'", self._output_file)

        # Rewrite paths to pull from the training images so mask and face data can be accessed
        images = {}
        for side, input_ in zip(("a", "b"), (input_a, input_b)):
            training_path = os.path.dirname(self._image_paths[side][0])
            images[side] = [
                os.path.join(training_path, os.path.basename(pth))
                for pth in get_image_paths(input_)
            ]

        batchsize = min(len(images["a"]), len(images["b"]), self._num_images)
        self._feeder.set_timelapse_feed(images, batchsize)
        logger.debug("Set up time-lapse")
Ejemplo n.º 6
0
    def _set_saver(self, arguments):
        """ set the saver in a background thread

        Parameters
        ----------
        arguments: :class:`argparse.Namespace`
            The :mod:`argparse` arguments as passed in from :mod:`tools.py`

        Returns
        -------
        ``None`` or :class:`lib.image.ImagesSaver`:
            If output is requested, returns a :class:`lib.image.ImagesSaver` otherwise
            returns ``None``
        """
        if not hasattr(
                arguments,
                "output") or arguments.output is None or not arguments.output:
            if self._update_type == "output":
                logger.error(
                    "Processing set as 'output' but no output folder provided."
                )
                sys.exit(0)
            logger.debug("No output provided. Not creating saver")
            return None
        output_dir = str(get_folder(arguments.output, make_folder=True))
        logger.info("Saving preview masks to: '%s'", output_dir)
        saver = ImagesSaver(output_dir)
        logger.debug(saver)
        return saver
Ejemplo n.º 7
0
    def process(self, output_item):
        """ Detect and move blurry face """
        extractor = AlignerExtract()

        for face in output_item["detected_faces"]:
            aligned_landmarks = face.aligned_landmarks
            resized_face = face.aligned_face
            size = face.aligned["size"]
            feature_mask = extractor.get_feature_mask(
                aligned_landmarks / size,
                size, 48)
            feature_mask = cv2.blur(  # pylint: disable=no-member
                feature_mask, (10, 10))
            isolated_face = cv2.multiply(  # pylint: disable=no-member
                feature_mask,
                resized_face.astype(float)).astype(np.uint8)
            blurry, focus_measure = is_blurry(isolated_face, self.blur_thresh)

            if blurry:
                blur_folder = output_item["output_file"].parts[:-1]
                blur_folder = get_folder(Path(*blur_folder) / Path("blurry"))
                frame_name = output_item["output_file"].parts[-1]
                output_item["output_file"] = blur_folder / Path(frame_name)
                if self.verbose:
                    print("{}'s focus measure of {} was below the blur "
                          "threshold, moving to \"blurry\"".format(
                              frame_name, focus_measure))
Ejemplo n.º 8
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self._args = arguments
        self._output_dir = None if self._args.skip_saving_faces else get_folder(
            self._args.output_dir)

        logger.info("Output Directory: %s", self._args.output_dir)
        self._images = ImagesLoader(self._args.input_dir, fast_count=True)
        self._alignments = Alignments(self._args, True, self._images.is_video)

        self._existing_count = 0
        self._set_skip_list()

        self._post_process = PostProcess(arguments)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        normalization = None if self._args.normalization == "none" else self._args.normalization

        maskers = ["components", "extended"]
        maskers += self._args.masker if self._args.masker else []
        self._extractor = Extractor(self._args.detector,
                                    self._args.aligner,
                                    maskers,
                                    configfile=configfile,
                                    multiprocess=not self._args.singleprocess,
                                    exclude_gpus=self._args.exclude_gpus,
                                    rotate_images=self._args.rotate_images,
                                    min_size=self._args.min_size,
                                    normalize_method=normalization,
                                    re_feed=self._args.re_feed)
        self._threads = list()
        self._verify_output = False
        logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 9
0
    def load_model(self):
        """ Load the model requested for training """
        model_dir = get_folder(self.args.model_dir)
        model = PluginLoader.get_model(self.trainer_name)(model_dir, self.args.gpus)

        model.load(swapped=False)
        return model
Ejemplo n.º 10
0
Archivo: cli.py Proyecto: cjayyy/faceit
    def process_arguments(self, arguments):
        self.arguments = arguments
        print("Input Directory: {}".format(self.arguments.input_dir))
        print("Output Directory: {}".format(self.arguments.output_dir))
        self.serializer = None
        if self.arguments.serializer is None and self.arguments.alignments_path is not None:
            ext = os.path.splitext(self.arguments.alignments_path)[-1]
            self.serializer = Serializer.get_serializer_fromext(ext)
            print(self.serializer, self.arguments.alignments_path)
        else:
            self.serializer = Serializer.get_serializer(self.arguments.serializer or "json")
        print("Using {} serializer".format(self.serializer.ext))

        print('Starting, this may take a while...')

        self.output_dir = get_folder(self.arguments.output_dir)
        try:
            self.input_dir = get_image_paths(self.arguments.input_dir)
        except:
            print('Input directory not found. Please ensure it exists.')
            exit(1)

        self.filter = self.load_filter()
        self.process()
        self.finalize()
Ejemplo n.º 11
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)

        self.patch_threads = None
        self.images = Images(self.args)
        self.validate()
        self.alignments = Alignments(self.args, False, self.images.is_video)
        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)

        self.add_queues()
        self.disk_io = DiskIO(self.alignments, self.images, arguments)
        self.predictor = Predict(self.disk_io.load_queue, self.queue_size,
                                 arguments)

        configfile = self.args.configfile if hasattr(self.args,
                                                     "configfile") else None
        self.converter = Converter(get_folder(self.args.output_dir),
                                   self.predictor.output_size,
                                   self.predictor.has_predicted_mask,
                                   self.disk_io.draw_transparent,
                                   self.disk_io.pre_encode,
                                   arguments,
                                   configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 12
0
    def process(self, output_item):
        """ Detect and move blurry face """
        extractor = AlignerExtract()

        for idx, face in enumerate(output_item["detected_faces"]):
            resized_face = output_item["resized_faces"][idx]
            dims = resized_face.shape[:2]
            size = dims[0]
            t_mat = output_item["t_mats"][idx]

            aligned_landmarks = extractor.transform_points(
                face.landmarksXY, t_mat, size, 48)
            feature_mask = extractor.get_feature_mask(aligned_landmarks / size,
                                                      size, 48)
            feature_mask = cv2.blur(feature_mask, (10, 10))
            isolated_face = cv2.multiply(feature_mask,
                                         resized_face.astype(float)).astype(
                                             np.uint8)
            blurry, focus_measure = is_blurry(isolated_face, self.blur_thresh)

            if blurry:
                blur_folder = output_item["output_file"].parts[:-1]
                blur_folder = get_folder(Path(*blur_folder) / Path("blurry"))
                frame_name = output_item["output_file"].parts[-1]
                output_item["output_file"] = blur_folder / Path(frame_name)
                if self.verbose:
                    print("{}'s focus measure of {} was below the blur "
                          "threshold, moving to \"blurry\"".format(
                              frame_name, focus_measure))
Ejemplo n.º 13
0
 def load_model(self):
     """ Load the model requested for training """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir)
     configfile = self.args.configfile if hasattr(self.args,
                                                  "configfile") else None
     augment_color = not self.args.no_augment_color
     model = PluginLoader.get_model(self.trainer_name)(
         model_dir,
         gpus=self.args.gpus,
         configfile=configfile,
         snapshot_interval=self.args.snapshot_interval,
         no_logs=self.args.no_logs,
         warp_to_landmarks=self.args.warp_to_landmarks,
         augment_color=augment_color,
         no_flip=self.args.no_flip,
         training_image_size=self.image_size,
         alignments_paths=self.alignments_paths,
         preview_scale=self.args.preview_scale,
         pingpong=self.args.pingpong,
         memory_saving_gradients=self.args.memory_saving_gradients,
         optimizer_savings=self.args.optimizer_savings,
         predict=False)
     logger.debug("Loaded Model")
     return model
Ejemplo n.º 14
0
    def set_timelapse(self):
        """ Set time-lapse paths if requested """
        if (not self.args.timelapse_input_a and not self.args.timelapse_input_b
                and not self.args.timelapse_output):
            return None
        if not self.args.timelapse_input_a or not self.args.timelapse_input_b:
            raise ValueError("To enable the timelapse, you have to supply "
                             "all the parameters (--timelapse-input-A and "
                             "--timelapse-input-B).")

        timelapse_output = None
        if self.args.timelapse_output is not None:
            timelapse_output = str(get_folder(self.args.timelapse_output))

        for folder in (self.args.timelapse_input_a,
                       self.args.timelapse_input_b, timelapse_output):
            if folder is not None and not os.path.isdir(folder):
                raise ValueError(
                    "The Timelapse path '{}' does not exist".format(folder))

        kwargs = {
            "input_a": self.args.timelapse_input_a,
            "input_b": self.args.timelapse_input_b,
            "output": timelapse_output
        }
        logger.debug("Timelapse enabled: %s", kwargs)
        return kwargs
Ejemplo n.º 15
0
    def process(self, output_item):
        """ Detect and move blurry face """
        extractor = AlignerExtract()

        for idx, detected_face in enumerate(output_item["detected_faces"]):
            frame_name = detected_face["file_location"].parts[-1]
            face = detected_face["face"]
            logger.trace("Checking for blurriness. Frame: '%s', Face: %s", frame_name, idx)
            aligned_landmarks = face.aligned_landmarks
            resized_face = face.aligned_face
            size = face.aligned["size"]
            padding = int(size * 0.1875)
            feature_mask = extractor.get_feature_mask(
                aligned_landmarks / size,
                size, padding)
            feature_mask = cv2.blur(  # pylint: disable=no-member
                feature_mask, (10, 10))
            isolated_face = cv2.multiply(  # pylint: disable=no-member
                feature_mask,
                resized_face.astype(float)).astype(np.uint8)
            blurry, focus_measure = self.is_blurry(isolated_face)

            if blurry:
                blur_folder = detected_face["file_location"].parts[:-1]
                blur_folder = get_folder(Path(*blur_folder) / Path("blurry"))
                detected_face["file_location"] = blur_folder / Path(frame_name)
                logger.verbose("%s's focus measure of %s was below the blur threshold, "
                               "moving to 'blurry'", frame_name, "{0:.2f}".format(focus_measure))
Ejemplo n.º 16
0
    def snapshot_models(self, iterations):
        """ Take a snapshot of the model at the current state and back it up.

        The snapshot is a copy of the model folder located in the same root location
        as the original model file, with the number of iterations appended to the end
        of the folder name.

        Parameters
        ----------
        iterations: int
            The number of iterations that the model has trained when performing the snapshot.
        """
        print("")  # New line so log message doesn't append to last loss output
        logger.verbose("Saving snapshot")
        snapshot_dir = "{}_snapshot_{}_iters".format(self.model_dir,
                                                     iterations)

        if os.path.isdir(snapshot_dir):
            logger.debug("Removing previously existing snapshot folder: '%s'",
                         snapshot_dir)
            rmtree(snapshot_dir)

        dst = str(get_folder(snapshot_dir))
        for filename in os.listdir(self.model_dir):
            if not self._check_valid(filename, for_restore=False):
                logger.debug("Not snapshotting file: '%s'", filename)
                continue
            srcfile = os.path.join(self.model_dir, filename)
            dstfile = os.path.join(dst, filename)
            copyfunc = copytree if os.path.isdir(srcfile) else copyfile
            logger.debug("Saving snapshot: '%s' > '%s'", srcfile, dstfile)
            copyfunc(srcfile, dstfile)
        logger.info("Saved snapshot (%s iterations)", iterations)
Ejemplo n.º 17
0
    def process_arguments(self, arguments):
        self.arguments = arguments
        print("Input Directory: {}".format(self.arguments.input_dir))
        print("Output Directory: {}".format(self.arguments.output_dir))
        self.serializer = None
        if self.arguments.serializer is None and self.arguments.alignments_path is not None:
            ext = os.path.splitext(self.arguments.alignments_path)[-1]
            self.serializer = Serializer.get_serializer_fromext(ext)
            print(self.serializer, self.arguments.alignments_path)
        else:
            self.serializer = Serializer.get_serializer(self.arguments.serializer or "json")
        print("Using {} serializer".format(self.serializer.ext))

        print('Starting, this may take a while...')

        self.output_dir = get_folder(self.arguments.output_dir)
        try:
            self.input_dir = get_image_paths(self.arguments.input_dir)
        except:
            print('Input directory not found. Please ensure it exists.')
            exit(1)

        self.filter = self.load_filter()
        self.process()
        self.finalize()
Ejemplo n.º 18
0
    def detect_blurry_faces(self, face, t_mat, resized_image, filename):
        """ Detect and move blurry face """
        if not hasattr(self.args, 'blur_thresh') or not self.args.blur_thresh:
            return None

        blurry_file = None
        aligned_landmarks = self.extractor.transform_points(
            face.landmarks_as_xy(),
            t_mat,
            256,
            48)
        feature_mask = self.extractor.get_feature_mask(aligned_landmarks / 256,
                                                       256,
                                                       48)
        feature_mask = cv2.blur(feature_mask, (10, 10))
        isolated_face = cv2.multiply(
            feature_mask,
            resized_image.astype(float)).astype(np.uint8)
        blurry, focus_measure = is_blurry(isolated_face, self.args.blur_thresh)

        if blurry:
            print("{}'s focus measure of {} was below the blur threshold, "
                  "moving to \"blurry\"".format(Path(filename).stem,
                                                focus_measure))
            blurry_file = get_folder(Path(self.output_dir) /
                                     Path("blurry")) / Path(filename).stem
        return blurry_file
Ejemplo n.º 19
0
    def __init__(self, description='default'):
        print('Initializing')
        self.parse_arguments(description)

        print("Input Directory: {}".format(self.arguments.input_dir))
        print("Output Directory: {}".format(self.arguments.output_dir))
        print('Starting, this may take a while...')

        self.output_dir = get_folder(self.arguments.output_dir)
        try:
            self.input_dir = get_image_paths(self.arguments.input_dir)
        except:
            print('Input directory not found. Please ensure it exists.')
            exit(1)

        self.images_found = len(self.input_dir)

        for filename in self.input_dir:
            if self.arguments.verbose:
                print('Processing: {}'.format(os.path.basename(filename)))

            self.process_image(filename)
            self.images_processed = self.images_processed + 1

        self.finalize()
Ejemplo n.º 20
0
    def detect_blurry_faces(self, face, t_mat, resized_image, filename):
        """ Detect and move blurry face """
        if not hasattr(self.args, 'blur_thresh') or not self.args.blur_thresh:
            return None

        blurry_file = None
        aligned_landmarks = self.extractor.transform_points(
            face.landmarks_as_xy(),
            t_mat,
            256,
            48)
        feature_mask = self.extractor.get_feature_mask(aligned_landmarks / 256,
                                                       256,
                                                       48)
        feature_mask = cv2.blur(feature_mask, (10, 10))
        isolated_face = cv2.multiply(
            feature_mask,
            resized_image.astype(float)).astype(np.uint8)
        blurry, focus_measure = is_blurry(isolated_face, self.args.blur_thresh)

        if blurry:
            print("{}'s focus measure of {} was below the blur threshold, "
                  "moving to \"blurry\"".format(Path(filename).stem,
                                                focus_measure))
            blurry_file = get_folder(Path(self.output_dir) /
                                     Path("blurry")) / Path(filename).stem
        return blurry_file
Ejemplo n.º 21
0
    def convert(self, converter, item):
        try:
            (filename, image, faces) = item

            skip = self.check_skipframe(filename)
            if self.arguments.discard_frames and skip:
                return

            if not skip:  # process frame as normal
                for idx, face in faces:
                    if self.input_aligned_dir is not None and self.check_skipface(
                            filename, idx):
                        print('face {} for frame {} was deleted, skipping'.
                              format(idx, os.path.basename(filename)))
                        continue
                    image = converter.patch_image(
                        image, face,
                        64 if "128" not in self.arguments.trainer else 128)
                    # TODO: This switch between 64 and 128 is a hack for now. We should have a separate cli option for size

            output_file = get_folder(self.output_dir) / Path(filename).name
            cv2.imwrite(str(output_file), image)
        except Exception as e:
            print('Failed to convert image: {}. Reason: {}'.format(
                filename, e))
Ejemplo n.º 22
0
 def __init__(self, arguments):
     logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                  arguments)
     self.args = arguments
     Utils.set_verbosity(self.args.loglevel)
     self.output_dir = get_folder(self.args.output_dir)
     logger.info("Output Directory: %s", self.args.output_dir)
     self.images = Images(self.args)
     self.alignments = Alignments(self.args, True, self.images.is_video)
     self.post_process = PostProcess(arguments)
     configfile = self.args.configfile if hasattr(self.args,
                                                  "configfile") else None
     normalization = None if self.args.normalization == "none" else self.args.normalization
     self.extractor = Extractor(self.args.detector,
                                self.args.aligner,
                                self.args.loglevel,
                                configfile=configfile,
                                multiprocess=not self.args.singleprocess,
                                rotate_images=self.args.rotate_images,
                                min_size=self.args.min_size,
                                normalize_method=normalization)
     self.save_queue = queue_manager.get_queue("extract_save")
     self.verify_output = False
     self.save_interval = None
     if hasattr(self.args, "save_interval"):
         self.save_interval = self.args.save_interval
     logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 23
0
    def _set_timelapse(self):
        """ Set time-lapse paths if requested.

        Returns
        -------
        dict
            The time-lapse keyword arguments for passing to the trainer

        """
        if (not self._args.timelapse_input_a and
                not self._args.timelapse_input_b and
                not self._args.timelapse_output):
            return None
        if (not self._args.timelapse_input_a or
                not self._args.timelapse_input_b or
                not self._args.timelapse_output):
            raise FaceswapError("To enable the timelapse, you have to supply all the parameters "
                                "(--timelapse-input-A, --timelapse-input-B and "
                                "--timelapse-output).")

        timelapse_output = str(get_folder(self._args.timelapse_output))

        for folder in (self._args.timelapse_input_a, self._args.timelapse_input_b):
            if folder is not None and not os.path.isdir(folder):
                raise FaceswapError("The Timelapse path '{}' does not exist".format(folder))
            exts = [os.path.splitext(fname)[-1] for fname in os.listdir(folder)]
            if not any(ext in _image_extensions for ext in exts):
                raise FaceswapError("The Timelapse path '{}' does not contain any valid "
                                    "images".format(folder))
        kwargs = {"input_a": self._args.timelapse_input_a,
                  "input_b": self._args.timelapse_input_b,
                  "output": timelapse_output}
        logger.debug("Timelapse enabled: %s", kwargs)
        return kwargs
Ejemplo n.º 24
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._output_dir = str(get_folder(self._args.output_dir))

        logger.info("Output Directory: %s", self._args.output_dir)
        self._images = ImagesLoader(self._args.input_dir,
                                    load_with_hash=False,
                                    fast_count=True)
        self._alignments = Alignments(self._args, True, self._images.is_video)

        self._existing_count = 0
        self._set_skip_list()

        self._post_process = PostProcess(arguments)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        normalization = None if self._args.normalization == "none" else self._args.normalization
        self._extractor = Extractor(self._args.detector,
                                    self._args.aligner,
                                    self._args.masker,
                                    configfile=configfile,
                                    multiprocess=not self._args.singleprocess,
                                    rotate_images=self._args.rotate_images,
                                    min_size=self._args.min_size,
                                    normalize_method=normalization)
        self._threads = list()
        self._verify_output = False
        logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 25
0
    def set_timelapse(self):
        """ Set timelapse paths if requested """
        if (not self.args.timelapse_input_a and
                not self.args.timelapse_input_b and
                not self.args.timelapse_output):
            return None
        if not self.args.timelapse_input_a or not self.args.timelapse_input_b:
            raise ValueError("To enable the timelapse, you have to supply "
                             "all the parameters (--timelapse-input-A and "
                             "--timelapse-input-B).")

        timelapse_output = None
        if self.args.timelapse_output is not None:
            timelapse_output = str(get_folder(self.args.timelapse_output))

        for folder in (self.args.timelapse_input_a,
                       self.args.timelapse_input_b,
                       timelapse_output):
            if folder is not None and not os.path.isdir(folder):
                raise ValueError("The Timelapse path '{}' does not exist".format(folder))

        kwargs = {"input_a": self.args.timelapse_input_a,
                  "input_b": self.args.timelapse_input_b,
                  "output": timelapse_output}
        logger.debug("Timelapse enabled: %s", kwargs)
        return kwargs
Ejemplo n.º 26
0
    def handleImage(self, image, filename):
        faces = self.get_faces(image)
        process_faces = [(idx, face) for idx, face in faces]

        # Run image rotator if requested and no faces found
        if self.arguments.rotate_images.lower() == 'on' and len(
                process_faces) == 0:
            process_faces, image = self.imageRotator(image)

        rvals = []
        for idx, face in process_faces:
            # Draws landmarks for debug
            if self.arguments.debug_landmarks:
                for (x, y) in face.landmarksAsXY():
                    cv2.circle(image, (x, y), 2, (0, 0, 255), -1)

            resized_image = self.extractor.extract(image, face, 256)
            output_file = get_folder(self.output_dir) / Path(filename).stem
            cv2.imwrite(
                '{}_{}{}'.format(str(output_file), str(idx),
                                 Path(filename).suffix), resized_image)
            f = {
                "r": face.r,
                "x": face.x,
                "w": face.w,
                "y": face.y,
                "h": face.h,
                "landmarksXY": face.landmarksAsXY()
            }
            rvals.append(f)
        return rvals
Ejemplo n.º 27
0
    def _background_extract(self, output_folder, progress_queue):
        """ Perform the background extraction in a thread so GUI doesn't become unresponsive.

        Parameters
        ----------
        output_folder: str
            The location to save the output faces to
        progress_queue: :class:`queue.Queue`
            The queue to place incremental counts to for updating the GUI's progress bar
        """
        saver = ImagesSaver(str(get_folder(output_folder)), as_bytes=True)
        loader = ImagesLoader(self._input_location,
                              count=self._alignments.frames_count)
        for frame_idx, (filename, image) in enumerate(loader.load()):
            logger.trace("Outputting frame: %s: %s", frame_idx, filename)
            basename = os.path.basename(filename)
            frame_name, extension = os.path.splitext(basename)
            final_faces = []
            progress_queue.put(1)
            for face_idx, face in enumerate(self._frame_faces[frame_idx]):
                output = "{}_{}{}".format(frame_name, str(face_idx), extension)
                aligned = AlignedFace(face.landmarks_xy,
                                      image=image,
                                      centering="head",
                                      size=512)  # TODO user selectable size
                face.hash, b_image = encode_image_with_hash(
                    aligned.face, extension)
                saver.save(output, b_image)
                final_faces.append(face.to_alignment())
            self._alignments.data[basename]["faces"] = final_faces
        saver.close()
Ejemplo n.º 28
0
    def handleImage(self, image, filename):
        count = 0

        faces = self.get_faces(image)
        rvals = []
        for idx, face in faces:
            count = idx

            # Draws landmarks for debug
            if self.arguments.debug_landmarks:
                for (x, y) in face.landmarksAsXY():
                    cv2.circle(image, (x, y), 2, (0, 0, 255), -1)

            resized_image = self.extractor.extract(image, face, 256)
            output_file = get_folder(self.output_dir) / Path(filename).stem
            cv2.imwrite(
                '{}_{}{}'.format(str(output_file), str(idx),
                                 Path(filename).suffix), resized_image)
            f = {
                "x": face.x,
                "w": face.w,
                "y": face.y,
                "h": face.h,
                "landmarksXY": face.landmarksAsXY()
            }
            rvals.append(f)
        return rvals
Ejemplo n.º 29
0
    def process(self, output_item):
        """ Detect and move blurry face """
        extractor = AlignerExtract()

        for idx, detected_face in enumerate(output_item["detected_faces"]):
            frame_name = detected_face["file_location"].parts[-1]
            face = detected_face["face"]
            logger.trace("Checking for blurriness. Frame: '%s', Face: %s",
                         frame_name, idx)
            aligned_landmarks = face.aligned_landmarks
            resized_face = face.aligned_face
            size = face.aligned["size"]
            padding = int(size * 0.1875)
            feature_mask = extractor.get_feature_mask(aligned_landmarks / size,
                                                      size, padding)
            feature_mask = cv2.blur(feature_mask, (10, 10))
            isolated_face = cv2.multiply(feature_mask,
                                         resized_face.astype(float)).astype(
                                             np.uint8)
            blurry, focus_measure = self.is_blurry(isolated_face)

            if blurry:
                blur_folder = detected_face["file_location"].parts[:-1]
                blur_folder = get_folder(Path(*blur_folder) / Path("blurry"))
                detected_face["file_location"] = blur_folder / Path(frame_name)
                logger.verbose(
                    "%s's focus measure of %s was below the blur threshold, "
                    "moving to 'blurry'", frame_name,
                    "{0:.2f}".format(focus_measure))
Ejemplo n.º 30
0
    def _set_timelapse(self):
        """ Set time-lapse paths if requested.

        Returns
        -------
        dict
            The time-lapse keyword arguments for passing to the trainer

        """
        if (not self._args.timelapse_input_a
                and not self._args.timelapse_input_b
                and not self._args.timelapse_output):
            return None
        if (not self._args.timelapse_input_a
                or not self._args.timelapse_input_b
                or not self._args.timelapse_output):
            raise FaceswapError(
                "To enable the timelapse, you have to supply all the parameters "
                "(--timelapse-input-A, --timelapse-input-B and "
                "--timelapse-output).")

        timelapse_output = get_folder(self._args.timelapse_output)

        for side in ("a", "b"):
            folder = getattr(self._args, f"timelapse_input_{side}")
            if folder is not None and not os.path.isdir(folder):
                raise FaceswapError(
                    f"The Timelapse path '{folder}' does not exist")

            training_folder = getattr(self._args, f"input_{side}")
            if folder == training_folder:
                continue  # Time-lapse folder is training folder

            filenames = [
                fname for fname in os.listdir(folder)
                if os.path.splitext(fname)[-1].lower() in _image_extensions
            ]
            if not filenames:
                raise FaceswapError(
                    f"The Timelapse path '{folder}' does not contain any valid "
                    "images")

            # Time-lapse images must appear in the training set, as we need access to alignment and
            # mask info. Check filenames are there to save failing much later in the process.
            training_images = [
                os.path.basename(img) for img in self._images[side]
            ]
            if not all(img in training_images for img in filenames):
                raise FaceswapError(
                    f"All images in the Timelapse folder '{folder}' must exist in "
                    f"the training folder '{training_folder}'")

        kwargs = {
            "input_a": self._args.timelapse_input_a,
            "input_b": self._args.timelapse_input_b,
            "output": timelapse_output
        }
        logger.debug("Timelapse enabled: %s", kwargs)
        return kwargs
Ejemplo n.º 31
0
 def load_model(self):
     """ Load the model requested for conversion """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir)
     model = PluginLoader.get_model(self.args.trainer)(model_dir,
                                                       self.args.gpus,
                                                       predict=True)
     logger.debug("Loaded Model")
     return model
Ejemplo n.º 32
0
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)
Ejemplo n.º 33
0
 def handleImage(self, filename):
     count = 0
     image = cv2.imread(filename)
     for idx, face in self.get_faces(image):
         count = idx
         resized_image = self.extractor.extract(image, face, 256)
         output_file = get_folder(self.output_dir) / Path(filename).stem
         cv2.imwrite(str(output_file) + str(idx) + Path(filename).suffix, resized_image)
     return count + 1
Ejemplo n.º 34
0
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)
Ejemplo n.º 35
0
    def __init__(self, arguments):
        self.args = arguments
        self.extractor = self.load_extractor()
        self.filter = self.load_face_filter()
        self.align_eyes = self.args.align_eyes if hasattr(self.args, 'align_eyes') else False
        self.output_dir = get_folder(self.args.output_dir)

        self.faces_detected = dict()
        self.num_faces_detected = 0
        self.verify_output = False
Ejemplo n.º 36
0
 def validate(self):
     """ Make the output folder if it doesn't exist and check that video flag is
         a valid choice """
     if (self.args.writer == "ffmpeg" and not self.images.is_video
             and self.args.reference_video is None):
         raise FaceswapError(
             "Output as video selected, but using frames as input. You must "
             "provide a reference video ('-ref', '--reference-video').")
     output_dir = get_folder(self.args.output_dir)
     logger.info("Output Directory: %s", output_dir)
Ejemplo n.º 37
0
 def validate(self):
     """ Make the output folder if it doesn't exist and check that video flag is
         a valid choice """
     if (self.args.writer == "ffmpeg" and
             not self.images.is_video and
             self.args.reference_video is None):
         logger.error("Output as video selected, but using frames as input. You must provide a "
                      "reference video ('-ref', '--reference-video').")
         exit(1)
     output_dir = get_folder(self.args.output_dir)
     logger.info("Output Directory: %s", output_dir)
Ejemplo n.º 38
0
 def load_model(self):
     """ Load the model requested for conversion """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir, make_folder=False)
     if not model_dir:
         logger.error("%s does not exist.", self.args.model_dir)
         exit(1)
     trainer = self.get_trainer(model_dir)
     model = PluginLoader.get_model(trainer)(model_dir, self.args.gpus, predict=True)
     logger.debug("Loaded Model")
     return model
Ejemplo n.º 39
0
    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print("Model Not Found! A valid model must be provided to continue!")
            exit(1)

        return model
Ejemplo n.º 40
0
    def process_arguments(self, arguments):
        self.arguments = arguments
        print("Input Directory: {}".format(self.arguments.input_dir))
        print("Output Directory: {}".format(self.arguments.output_dir))
        print('Starting, this may take a while...')

        self.output_dir = get_folder(self.arguments.output_dir)
        try:
            self.input_dir = get_image_paths(self.arguments.input_dir)
        except:
            print('Input directory not found. Please ensure it exists.')
            exit(1)

        self.images_found = len(self.input_dir)
        self.filter = self.load_filter()
        self.process()
        self.finalize()
Ejemplo n.º 41
0
    def processThread(self):
        if self.arguments.allow_growth:
            self.set_tf_allow_growth()
        
        print('Loading data, this may take a while...')
        # this is so that you can enter case insensitive values for trainer
        trainer = self.arguments.trainer
        trainer = "LowMem" if trainer.lower() == "lowmem" else trainer
        model = PluginLoader.get_model(trainer)(get_folder(self.arguments.model_dir))
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)
        trainer = PluginLoader.get_trainer(trainer)
        trainer = trainer(model, images_A, images_B, batch_size=self.arguments.batch_size)

        try:
            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, self.arguments.epochs):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(epoch, self.show if (save_iteration or self.save_now) else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
        except Exception as e:
            print(e)
            exit(1)
Ejemplo n.º 42
0
 def load_model(self):
     """ Load the model requested for training """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir)
     model = PluginLoader.get_model(self.trainer_name)(
         model_dir,
         self.args.gpus,
         no_logs=self.args.no_logs,
         warp_to_landmarks=self.args.warp_to_landmarks,
         no_flip=self.args.no_flip,
         training_image_size=self.image_size,
         alignments_paths=self.alignments_paths,
         preview_scale=self.args.preview_scale,
         pingpong=self.args.pingpong,
         memory_saving_gradients=self.args.memory_saving_gradients,
         predict=False)
     logger.debug("Loaded Model")
     return model
Ejemplo n.º 43
0
    def handleImage(self, image, filename):
        count = 0

        faces = self.get_faces(image)
        rvals = []
        for idx, face in faces:
            count = idx

            resized_image = self.extractor.extract(image, face, 256)
            output_file = get_folder(self.output_dir) / Path(filename).stem
            cv2.imwrite(str(output_file) + str(idx) + Path(filename).suffix, resized_image)
            f = {
                "x": face.x,
                "w": face.w,
                "y": face.y,
                "h": face.h,
                "landmarksXY": face.landmarksAsXY()
            }
            rvals.append(f)
        return rvals
Ejemplo n.º 44
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector,
                                   self.args.aligner,
                                   self.args.loglevel,
                                   self.args.multiprocess,
                                   self.args.rotate_images,
                                   self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)
Ejemplo n.º 45
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)

        self.images = Images(self.args)
        self.validate()
        self.alignments = Alignments(self.args, False, self.images.is_video)
        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images, arguments.input_aligned_dir)
        self.opts = OptionalActions(self.args, self.images.input_images, self.alignments)

        self.add_queues()
        self.disk_io = DiskIO(self.alignments, self.images, arguments)
        self.predictor = Predict(self.disk_io.load_queue, self.queue_size, arguments)
        self.converter = Converter(get_folder(self.args.output_dir),
                                   self.predictor.output_size,
                                   self.predictor.has_predicted_mask,
                                   self.disk_io.draw_transparent,
                                   self.disk_io.pre_encode,
                                   arguments)

        logger.debug("Initialized %s", self.__class__.__name__)