def load_plugins(self, config=None, disable_logging=False): """ Load the requested adjustment plugins """ logger.debug("Loading plugins. config: %s", config) self.adjustments["box"] = PluginLoader.get_converter( "mask", "box_blend", disable_logging=disable_logging)("none", self.output_size, configfile=self.configfile, config=config) self.adjustments["mask"] = PluginLoader.get_converter( "mask", "mask_blend", disable_logging=disable_logging)(self.args.mask_type, self.output_size, self.output_has_mask, configfile=self.configfile, config=config) if self.args.color_adjustment != "none" and self.args.color_adjustment is not None: self.adjustments["color"] = PluginLoader.get_converter( "color", self.args.color_adjustment, disable_logging=disable_logging)(configfile=self.configfile, config=config) if self.args.scaling != "none" and self.args.scaling is not None: self.adjustments["scaling"] = PluginLoader.get_converter( "scaling", self.args.scaling, disable_logging=disable_logging)(configfile=self.configfile, config=config) logger.debug("Loaded plugins: %s", self.adjustments)
def _load_plugins(self, config=None, disable_logging=False): """ Load the requested adjustment plugins. Loads the :mod:`plugins.converter` plugins that have been requested for this conversion session. Parameters ---------- config: :class:`lib.config.FaceswapConfig`, optional Optional pre-loaded :class:`lib.config.FaceswapConfig`. If passed, then this will be used over any configuration on disk. If ``None`` then it is ignored. Default: ``None`` disable_logging: bool, optional Plugin loader outputs logging info every time a plugin is loaded. Set to ``True`` to suppress these messages otherwise ``False``. Default: ``False`` """ logger.debug("Loading plugins. config: %s", config) self._adjustments["box"] = PluginLoader.get_converter( "mask", "box_blend", disable_logging=disable_logging)(self._output_size, configfile=self._configfile, config=config) self._adjustments["mask"] = PluginLoader.get_converter( "mask", "mask_blend", disable_logging=disable_logging)(self._args.mask_type if hasattr( self._args, "mask_type") else None, self._output_size, self._coverage_ratio, configfile=self._configfile, config=config) if hasattr(self._args, "color_adjustment"): if self._args.color_adjustment != "none" and self._args.color_adjustment is not None: self._adjustments["color"] = PluginLoader.get_converter( "color", self._args.color_adjustment, disable_logging=disable_logging)( configfile=self._configfile, config=config) if hasattr(self._args, "scaling"): if self._args.scaling != "none" and self._args.scaling is not None: self._adjustments["scaling"] = PluginLoader.get_converter( "scaling", self._args.scaling, disable_logging=disable_logging)( configfile=self._configfile, config=config) logger.debug("Loaded plugins: %s", self._adjustments)
def load_converter(self, model): """ Load the requested converter for conversion """ conv = self.args.converter converter = PluginLoader.get_converter(conv)(model.converter( self.args.swap_model), model=model, arguments=self.args) return converter
def load_plugins(self, output_size, output_has_mask): """ Load the requested adjustment plugins """ logger.debug("Loading plugins") self.adjustments["box"] = PluginLoader.get_converter( "mask", "box_blend")("none", output_size) self.adjustments["mask"] = PluginLoader.get_converter( "mask", "mask_blend")(self.args.mask_type, output_size, output_has_mask) if self.args.color_adjustment != "none" and self.args.color_adjustment is not None: self.adjustments["color"] = PluginLoader.get_converter( "color", self.args.color_adjustment)() if self.args.scaling != "none" and self.args.scaling is not None: self.adjustments["scaling"] = PluginLoader.get_converter( "scaling", self.args.scaling)() logger.debug("Loaded plugins: %s", self.adjustments)
def get_writer(self): """ Return the writer plugin """ args = [self.args.output_dir] if self.args.writer in ("ffmpeg", "gif"): args.append(self.total_count) if self.args.writer == "ffmpeg": if self.images.is_video: args.append(self.args.input_dir) else: args.append(self.args.reference_video) logger.debug("Writer args: %s", args) return PluginLoader.get_converter("writer", self.args.writer)(*args)
def get_writer(self): """ Return the writer plugin """ args = [self.args.output_dir] if self.args.writer in ("ffmpeg", "gif"): args.extend([self.total_count, self.frame_ranges]) if self.args.writer == "ffmpeg": if self.images.is_video: args.append(self.args.input_dir) else: args.append(self.args.reference_video) logger.debug("Writer args: %s", args) configfile = self.args.configfile if hasattr(self.args, "configfile") else None return PluginLoader.get_converter("writer", self.args.writer)(*args, configfile=configfile)
def load_converter(self, model): """ Load the requested converter for conversion """ args = self.args conv = args.converter converter = PluginLoader.get_converter(conv)( model.converter(False), trainer=args.trainer, blur_size=args.blur_size, seamless_clone=args.seamless_clone, sharpen_image=args.sharpen_image, mask_type=args.mask_type, erosion_kernel_size=args.erosion_kernel_size, match_histogram=args.match_histogram, smooth_mask=args.smooth_mask, avg_color_adjust=args.avg_color_adjust, draw_transparent=args.draw_transparent) return converter
def _get_writer(self): """ Load the selected writer plugin. Returns ------- :mod:`plugins.convert.writer` plugin The requested writer plugin """ args = [self._args.output_dir] if self._args.writer in ("ffmpeg", "gif"): args.extend([self._total_count, self._frame_ranges]) if self._args.writer == "ffmpeg": if self._images.is_video: args.append(self._args.input_dir) else: args.append(self._args.reference_video) logger.debug("Writer args: %s", args) configfile = self._args.configfile if hasattr(self._args, "configfile") else None return PluginLoader.get_converter("writer", self._args.writer)(*args, configfile=configfile)
def convert(self, video_file, swap_model=False, duration=None, start_time=None, use_gan=False, face_filter=False, photos=True, crop_x=None, width=None, side_by_side=False): # Magic incantation to not have tensorflow blow up with an out of memory error. import tensorflow as tf import keras.backend.tensorflow_backend as K config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = "0" K.set_session(tf.Session(config=config)) # Load model model_name = "Original" converter_name = "Masked" if use_gan: model_name = "GAN" converter_name = "GAN" model = PluginLoader.get_model(model_name)(Path( self._model_path(use_gan))) if not model.load(swap_model): print( 'model Not Found! A valid model must be provided to continue!') exit(1) # Load converter converter = PluginLoader.get_converter(converter_name) converter = converter(model.converter(False), blur_size=8, seamless_clone=True, mask_type="facehullandrect", erosion_kernel_size=None, smooth_mask=True, avg_color_adjust=True) # Load face filter filter_person = self._person_a if swap_model: filter_person = self._person_b filter = FaceFilter(self._people[filter_person]['faces']) # Define conversion method per frame def _convert_frame(frame, convert_colors=True): if convert_colors: frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV for face in DetectedFace(frame, "cnn"): if (not face_filter) or (face_filter and filter.check(face)): frame = converter.patch_image(frame, face) frame = frame.astype(numpy.float32) if convert_colors: frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV return frame def _convert_helper(get_frame, t): return _convert_frame(get_frame(t)) media_path = self._video_path({'name': video_file}) if not photos: # Process video; start loading the video clip video = VideoFileClip(media_path) # If a duration is set, trim clip if duration: video = video.subclip(start_time, start_time + duration) # Resize clip before processing if width: video = video.resize(width=width) # Crop clip if desired if crop_x: video = video.fx(crop, x2=video.w / 2) # Kick off convert frames for each frame new_video = video.fl(_convert_helper) # Stack clips side by side if side_by_side: def add_caption(caption, clip): text = (TextClip(caption, font='Amiri-regular', color='white', fontsize=80).margin(40).set_duration( clip.duration).on_color( color=(0, 0, 0), col_opacity=0.6)) return CompositeVideoClip([clip, text]) video = add_caption("Original", video) new_video = add_caption("Swapped", new_video) final_video = clips_array([[video], [new_video]]) else: final_video = new_video # Resize clip after processing #final_video = final_video.resize(width = (480 * 2)) # Write video output_path = os.path.join(self.OUTPUT_PATH, video_file) final_video.write_videofile(output_path, rewrite_audio=True) # Clean up del video del new_video del final_video else: # Process a directory of photos for face_file in os.listdir(media_path): face_path = os.path.join(media_path, face_file) image = cv2.imread(face_path) image = _convert_frame(image, convert_colors=False) cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)