def __init__(self, arguments): logger.debug("Initializing %s: (args: %s)", self.__class__.__name__, arguments) self.args = arguments Utils.set_verbosity(self.args.loglevel) self.patch_threads = None self.images = Images(self.args) self.validate() self.alignments = Alignments(self.args, False, self.images.is_video) self.opts = OptionalActions(self.args, self.images.input_images, self.alignments) self.add_queues() self.disk_io = DiskIO(self.alignments, self.images, arguments) self.predictor = Predict(self.disk_io.load_queue, self.queue_size, arguments) configfile = self.args.configfile if hasattr(self.args, "configfile") else None self.converter = Converter(get_folder(self.args.output_dir), self.predictor.output_size, self.predictor.has_predicted_mask, self.disk_io.draw_transparent, self.disk_io.pre_encode, arguments, configfile=configfile) logger.debug("Initialized %s", self.__class__.__name__)
def convert(self, converter, item): """ Apply the conversion transferring faces onto frames """ try: filename, image, faces = item skip = self.opts.check_skipframe(filename) if not skip: for idx, face in faces: # --- Changes --- if self.write_image_mask: image, image_mask = self.convert_one_face( converter, (filename, image, idx, face), return_image_mask=self.write_image_mask) # --- Changes --- image = self.convert_one_face( converter, (filename, image, idx, face)) if skip != "discard": filename = str(self.output_dir / Path(filename).name) Utils.cv2_read_write('write', filename, image) # --- Changes --- if self.write_image_mask: # Writes image into a mask folder, be careful with that if # you write multiple images os.makedirs(self.write_image_mask, exist_ok=True) Utils.cv2_read_write( 'write', os.path.join(self.write_image_mask, filename.split('/')[-1]), image_mask) # --- Changes --- except Exception as err: print("Failed to convert image: {}. " "Reason: {}".format(filename, err))
def process_single_image(self, filename): """ Detect faces in an image. Rotate the image the specified amount until at least one face is found, or until image rotations are depleted. Once at least one face has been detected, pass to process_single_face to process the individual faces """ retval = filename, list() try: image = Utils.cv2_read_write('read', filename) for angle in self.images.rotation_angles: image = Utils.rotate_image_by_angle(image, angle) faces = self.faces.get_faces(image, angle) process_faces = [(idx, face) for idx, face in faces] if process_faces and angle != 0 and self.args.verbose: print("found face(s) by rotating image {} degrees".format( angle)) if process_faces: break final_faces = [ self.process_single_face(idx, face, filename, image) for idx, face in process_faces ] retval = filename, final_faces except Exception as err: if self.args.verbose: print("Failed to extract from image: {}. Reason: {}".format( filename, err)) return retval
def process_single_image(self, filename): """ Detect faces in an image. Rotate the image the specified amount until at least one face is found, or until image rotations are depleted. Once at least one face has been detected, pass to process_single_face to process the individual faces """ retval = filename, list() try: image = Utils.cv2_read_write('read', filename) for angle in self.images.rotation_angles: currentimage = Utils.rotate_image_by_angle(image, angle) faces = self.faces.get_faces(currentimage, angle) process_faces = [(idx, face) for idx, face in faces] if process_faces and angle != 0 and self.args.verbose: print("found face(s) by rotating image {} degrees".format(angle)) if process_faces: break final_faces = [self.process_single_face(idx, face, filename, currentimage) for idx, face in process_faces] retval = filename, final_faces except Exception as err: if self.args.verbose: print("Failed to extract from image: {}. Reason: {}".format(filename, err)) return retval
def __init__(self, arguments): logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments) self.args = arguments Utils.set_verbosity(self.args.loglevel) self.output_dir = get_folder(self.args.output_dir) logger.info("Output Directory: %s", self.args.output_dir) self.images = Images(self.args) self.alignments = Alignments(self.args, True, self.images.is_video) self.post_process = PostProcess(arguments) configfile = self.args.configfile if hasattr(self.args, "configfile") else None normalization = None if self.args.normalization == "none" else self.args.normalization self.extractor = Extractor(self.args.detector, self.args.aligner, self.args.loglevel, configfile=configfile, multiprocess=not self.args.singleprocess, rotate_images=self.args.rotate_images, min_size=self.args.min_size, normalize_method=normalization) self.save_queue = queue_manager.get_queue("extract_save") self.verify_output = False self.save_interval = None if hasattr(self.args, "save_interval"): self.save_interval = self.args.save_interval logger.debug("Initialized %s", self.__class__.__name__)
def __init__(self, arguments): logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments) self._args = arguments Utils.set_verbosity(self._args.loglevel) self._output_dir = str(get_folder(self._args.output_dir)) logger.info("Output Directory: %s", self._args.output_dir) self._images = ImagesLoader(self._args.input_dir, load_with_hash=False, fast_count=True) self._alignments = Alignments(self._args, True, self._images.is_video) self._existing_count = 0 self._set_skip_list() self._post_process = PostProcess(arguments) configfile = self._args.configfile if hasattr(self._args, "configfile") else None normalization = None if self._args.normalization == "none" else self._args.normalization self._extractor = Extractor(self._args.detector, self._args.aligner, self._args.masker, configfile=configfile, multiprocess=not self._args.singleprocess, rotate_images=self._args.rotate_images, min_size=self._args.min_size, normalize_method=normalization) self._threads = list() self._verify_output = False logger.debug("Initialized %s", self.__class__.__name__)
def process_single_face(self, idx, face, filename, image): """ Perform processing on found faces """ output_file = self.output_dir / Path( filename).stem if self.export_face else None self.faces.draw_landmarks_on_face(face, image) resized_face, t_mat = self.faces.extractor.extract( image, face, 256, self.faces.align_eyes) blurry_file = self.faces.detect_blurry_faces(face, t_mat, resized_face, filename) output_file = blurry_file if blurry_file else output_file if self.export_face: filename = "{}_{}{}".format(str(output_file), str(idx), Path(filename).suffix) Utils.cv2_read_write('write', filename, resized_face) return {"r": face.r, "x": face.x, "w": face.w, "y": face.y, "h": face.h, "landmarksXY": face.landmarks_as_xy()}
def process_single_face(self, idx, face, filename, image): """ Perform processing on found faces """ output_file = self.output_dir / Path( filename).stem if self.export_face else None self.faces.draw_landmarks_on_face(face, image) resized_face, t_mat = self.faces.extractor.extract( image, face, 256, self.faces.align_eyes) blurry_file = self.faces.detect_blurry_faces(face, t_mat, resized_face, filename) output_file = blurry_file if blurry_file else output_file if self.export_face: filename = "{}_{}{}".format(str(output_file), str(idx), Path(filename).suffix) Utils.cv2_read_write('write', filename, resized_face) return { "r": face.r, "x": face.x, "w": face.w, "y": face.y, "h": face.h, "landmarksXY": face.landmarksAsXY() }
def process(self): """ Process the conversion """ logger.debug("Starting Conversion") # queue_manager.debug_monitor(3) self.convert_images() self.disk_io.save_thread.join() queue_manager.terminate_queues() Utils.finalize(self.images.images_found, self.predictor.faces_count, self.predictor.verify_output) logger.debug("Completed Conversion")
def process(self): """ Perform the extraction process """ logger.info('Starting, this may take a while...') # queue_manager.debug_monitor(3) self.threaded_io("load") save_thread = self.threaded_io("save") self.run_extraction() save_thread.join() self.alignments.save() Utils.finalize(self.images.images_found // self.skip_num, self.alignments.faces_count, self.verify_output)
def process(self): """ Perform the extraction process """ print('Starting, this may take a while...') Utils.set_verbosity(self.args.verbose) # queue_manager.debug_monitor(1) self.threaded_io("load") save_thread = self.threaded_io("save") self.run_extraction(save_thread) self.alignments.save() Utils.finalize(self.images.images_found, self.alignments.faces_count(), self.verify_output)
def process(self): """ Process the conversion """ logger.debug("Starting Conversion") # queue_manager.debug_monitor(2) self.convert_images() self.disk_io.save_thread.join() queue_manager.terminate_queues() Utils.finalize(self.images.images_found, self.predictor.faces_count, self.predictor.verify_output) logger.debug("Completed Conversion")
def convert(self, converter, item): """ Apply the conversion transferring faces onto frames """ try: filename, image, faces = item skip = self.opts.check_skipframe(filename) if not skip: for idx, face in faces: image = self.convert_one_face(converter, (filename, image, idx, face)) if skip != "discard": filename = str(self.output_dir / Path(filename).name) Utils.cv2_read_write('write', filename, image) except Exception as err: print("Failed to convert image: {}. Reason: {}".format(filename, err))
def process(self): """ The entry point for triggering the Extraction Process. Should only be called from :class:`lib.cli.ScriptExecutor` """ logger.info('Starting, this may take a while...') # from lib.queue_manager import queue_manager ; queue_manager.debug_monitor(3) self._threaded_redirector("load") self._run_extraction() for thread in self._threads: thread.join() self._alignments.save() Utils.finalize(self._images.process_count + self._existing_count, self._alignments.faces_count, self._verify_output)
def process(self): """ Perform the extraction process """ print('Starting, this may take a while...') Utils.set_verbosity(self.args.verbose) if hasattr(self.args, 'processes') and self.args.processes > 1: self.extract_multi_process() else: self.extract_single_process() self.alignments.write_alignments(self.faces.faces_detected) images, faces = Utils.finalize(self.images.images_found, self.faces.num_faces_detected, self.faces.verify_output) self.images.images_found = images self.faces.num_faces_detected = faces
def process(self): """ Process the conversion """ logger.debug("Starting Conversion") # queue_manager.debug_monitor(5) try: self.convert_images() self.disk_io.save_thread.join() queue_manager.terminate_queues() Utils.finalize(self.images.images_found, self.predictor.faces_count, self.predictor.verify_output) logger.debug("Completed Conversion") except MemoryError as err: msg = ("Faceswap ran out of RAM running convert. Conversion is very system RAM " "heavy, so this can happen in certain circumstances when you have a lot of " "cpus but not enough RAM to support them all." "\nYou should lower the number of processes in use by either setting the " "'singleprocess' flag (-sp) or lowering the number of parallel jobs (-j).") raise FaceswapError(msg) from err
def prepare_images(self): """ Prepare the images for conversion """ filename = "" for filename in tqdm(self.images.input_images, file=sys.stdout): if not self.check_alignments(filename): continue image = Utils.cv2_read_write('read', filename) faces = self.faces.get_faces_alignments(filename, image) if not faces: continue yield filename, image, faces
def process(self): """ Original & LowMem models go with Adjust or Masked converter Note: GAN prediction outputs a mask + an image, while other predicts only an image """ Utils.set_verbosity(self.args.verbose) if not self.alignments.have_alignments_file: self.generate_alignments() self.faces.faces_detected = self.alignments.read_alignments() model = self.load_model() converter = self.load_converter(model) batch = BackgroundGenerator(self.prepare_images(), 1) for item in batch.iterator(): self.convert(converter, item) Utils.finalize(self.images.images_found, self.faces.num_faces_detected, self.faces.verify_output)
def process(self): """ Perform the extraction process """ print('Starting, this may take a while...') Utils.set_verbosity(self.args.verbose) if (hasattr(self.args, 'multiprocess') and self.args.multiprocess and GPUStats().device_count == 0): # TODO Checking that there is no available GPU is not # necessarily an indicator of whether the user is actually # using the CPU. Maybe look to implement further checks on # dlib/tensorflow compilations self.extract_multi_process() else: self.extract_single_process() self.write_alignments() images, faces = Utils.finalize(self.images.images_found, self.faces.num_faces_detected, self.faces.verify_output) self.images.images_found = images self.faces.num_faces_detected = faces
def __init__(self, arguments): logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments) self.args = arguments Utils.set_verbosity(self.args.loglevel) self.output_dir = get_folder(self.args.output_dir) logger.info("Output Directory: %s", self.args.output_dir) self.images = Images(self.args) self.alignments = Alignments(self.args, True, self.images.is_video) self.post_process = PostProcess(arguments) self.extractor = Extractor(self.args.detector, self.args.aligner, self.args.loglevel, self.args.multiprocess, self.args.rotate_images, self.args.min_size) self.save_queue = queue_manager.get_queue("extract_save") self.verify_output = False self.save_interval = None if hasattr(self.args, "save_interval"): self.save_interval = self.args.save_interval logger.debug("Initialized %s", self.__class__.__name__)
def __init__(self, arguments): logger.debug("Initializing %s: (args: %s)", self.__class__.__name__, arguments) self.args = arguments Utils.set_verbosity(self.args.loglevel) self.images = Images(self.args) self.validate() self.alignments = Alignments(self.args, False, self.images.is_video) # Update Legacy alignments Legacy(self.alignments, self.images.input_images, arguments.input_aligned_dir) self.opts = OptionalActions(self.args, self.images.input_images, self.alignments) self.add_queues() self.disk_io = DiskIO(self.alignments, self.images, arguments) self.predictor = Predict(self.disk_io.load_queue, self.queue_size, arguments) self.converter = Converter(get_folder(self.args.output_dir), self.predictor.output_size, self.predictor.has_predicted_mask, self.disk_io.draw_transparent, self.disk_io.pre_encode, arguments) logger.debug("Initialized %s", self.__class__.__name__)
def process(self): """ Original & LowMem models go with Adjust or Masked converter Note: GAN prediction outputs a mask + an image, while other predicts only an image. """ Utils.set_verbosity(self.args.verbose) if not self.alignments.have_alignments_file: self.generate_alignments() self.faces.faces_detected = self.alignments.read_alignments() model = self.load_model() converter = self.load_converter(model) batch = BackgroundGenerator(self.prepare_images(), 1) for item in batch.iterator(): self.convert(converter, item) Utils.finalize(self.images.images_found, self.faces.num_faces_detected, self.faces.verify_output)
def process(self): """ Original & LowMem models go with Adjust or Masked converter Note: GAN prediction outputs a mask + an image, while other predicts only an image. """ Utils.set_verbosity(self.args.loglevel) if not self.alignments.have_alignments_file: self.load_extractor() model = self.load_model() converter = self.load_converter(model) batch = BackgroundGenerator(self.prepare_images(), 1) for item in batch.iterator(): self.convert(converter, item) if self.extract_faces: queue_manager.terminate_queues() Utils.finalize(self.images.images_found, self.faces_count, self.verify_output)