def process(self): """ Perform the Restore process """ logger.info("Starting live swap...") if not os.path.exists(self.model_dir): logger.error("Folder does not exist: '%s'", self.model_dir) exit(1) model_name = "Original" model = PluginLoader.get_model(model_name)(Path(self.model_dir)) if not model.load(): print('model Not Found! A valid model must be provided to continue!') exit(1) cap = cv2.VideoCapture(0) # precompute data for 2nd image once im2 = cv2.imread(IMG_PATH, cv2.IMREAD_COLOR) im2landmarks = self.get_landmarks(im2) mask2 = self.get_face_mask(im2, im2landmarks) while True: ret, frame = cap.read() frame = cv2.flip(frame, 1) try: frame = self.face_swap(frame, im2, im2landmarks, mask2, USE_COLOR_CORRECTION, USE_MASK) except RuntimeError as e: # Print error into image frame = cv2.putText(frame, str(e), (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA) cv2.imshow("Face", frame) if cv2.waitKey(1) == 13: break cap.release() cv2.destroyAllWindows()
def load_model(self): """ Load the model requested for training """ logger.debug("Loading Model") model_dir = get_folder(self.args.model_dir) configfile = self.args.configfile if hasattr(self.args, "configfile") else None augment_color = not self.args.no_augment_color model = PluginLoader.get_model(self.trainer_name)( model_dir, gpus=self.args.gpus, configfile=configfile, snapshot_interval=self.args.snapshot_interval, no_logs=self.args.no_logs, warp_to_landmarks=self.args.warp_to_landmarks, augment_color=augment_color, no_flip=self.args.no_flip, training_image_size=self.image_size, alignments_paths=self.alignments_paths, preview_scale=self.args.preview_scale, pingpong=self.args.pingpong, memory_saving_gradients=self.args.memory_saving_gradients, optimizer_savings=self.args.optimizer_savings, predict=False) logger.debug("Loaded Model") return model
def load_model(self): """ Load the model requested for training """ logger.debug("Loading Model") model_dir = get_folder(self.args.model_dir) configfile = self.args.configfile if hasattr(self.args, "configfile") else None model = PluginLoader.get_model(self.trainer_name)( model_dir, self.args.gpus, configfile=configfile, no_logs=self.args.no_logs, warp_to_landmarks=self.args.warp_to_landmarks, augment_color=self.args.augment_color, no_flip=self.args.no_flip, training_image_size=self.image_size, alignments_paths=self.alignments_paths, preview_scale=self.args.preview_scale, pingpong=self.args.pingpong, memory_saving_gradients=self.args.memory_saving_gradients, predict=False, num_of_sides=len( self.args.input_a ) #--------------------------------------------------------------------# ) logger.debug("Loaded Model") return model
def _load_model(self): logging.warning("Loading Model...") model = PluginLoader.get_model(self._model_name)( model_dir=None, arguments=None ) model.build() return model
def load_model(self): """ Load the model requested for training """ model_dir = get_folder(self.args.model_dir) model = PluginLoader.get_model(self.trainer_name)(model_dir, self.args.gpus) model.load(swapped=False) return model
def load_model(self): """ Load the model requested for conversion """ logger.debug("Loading Model") model_dir = get_folder(self.args.model_dir) model = PluginLoader.get_model(self.args.trainer)(model_dir, self.args.gpus, predict=True) logger.debug("Loaded Model") return model
def load_model(self): """ Load the model requested for conversion """ logger.debug("Loading Model") model_dir = get_folder(self.args.model_dir, make_folder=False) if not model_dir: logger.error("%s does not exist.", self.args.model_dir) exit(1) trainer = self.get_trainer(model_dir) model = PluginLoader.get_model(trainer)(model_dir, self.args.gpus, predict=True) logger.debug("Loaded Model") return model
def load_model(self): """ Load the model requested for conversion """ logger.debug("Loading Model") model_dir = get_folder(self.args.model_dir, make_folder=False) if not model_dir: raise FaceswapError("{} does not exist.".format( self.args.model_dir)) trainer = self.get_trainer(model_dir) gpus = 1 if not hasattr(self.args, "gpus") else self.args.gpus model = PluginLoader.get_model(trainer)(model_dir, gpus, predict=True) logger.debug("Loaded Model") return model
def load_model(self): """ Load the model requested for conversion """ model_name = self.args.trainer model_dir = get_folder(self.args.model_dir) num_gpus = self.args.gpus model = PluginLoader.get_model(model_name)(model_dir, num_gpus) if not model.load(self.args.swap_model): logger.error("Model Not Found! A valid model " "must be provided to continue!") exit(1) return model
def load_model(self): """ Load the model requested for training """ logger.debug("Loading Model") model_dir = get_folder(self.args.model_dir) model = PluginLoader.get_model(self.trainer_name)( model_dir, self.args.gpus, no_logs=self.args.no_logs, warp_to_landmarks=self.args.warp_to_landmarks, no_flip=self.args.no_flip, training_image_size=self.image_size, alignments_paths=self.alignments_paths, preview_scale=self.args.preview_scale) logger.debug("Loaded Model") return model
def _load_model(self): """ Load the model requested for training. Returns ------- :file:`plugins.train.model` plugin The requested model plugin """ logger.debug("Loading Model") model_dir = get_folder(self._args.model_dir) model = PluginLoader.get_model(self._args.trainer)(model_dir, self._args, predict=False) model.build() logger.debug("Loaded Model") return model
def _load_model(self): """ Load the Faceswap model. Returns ------- :mod:`plugins.train.model` plugin The trained model in the specified model folder """ logger.debug("Loading Model") model_dir = get_folder(self._args.model_dir, make_folder=False) if not model_dir: raise FaceswapError("{} does not exist.".format(self._args.model_dir)) trainer = self._get_model_name(model_dir) gpus = 1 if not hasattr(self._args, "gpus") else self._args.gpus model = PluginLoader.get_model(trainer)(model_dir, gpus, predict=True) logger.debug("Loaded Model") return model
def load_model(self): """ Load the model requested for training """ logger.debug("Loading Model") model_dir = get_folder(self.args.model_dir) model = PluginLoader.get_model(self.trainer_name)( model_dir, self.args.gpus, no_logs=self.args.no_logs, warp_to_landmarks=self.args.warp_to_landmarks, no_flip=self.args.no_flip, training_image_size=self.image_size, alignments_paths=self.alignments_paths, preview_scale=self.args.preview_scale, pingpong=self.args.pingpong, memory_saving_gradients=self.args.memory_saving_gradients, predict=False) logger.debug("Loaded Model") return model
def load_model(self): """ Load the model requested for training """ logger.debug("Loading Model") model_dir = get_folder(self.args.model_dir) model = PluginLoader.get_model(self.trainer_name)( model_dir, self.args.gpus, no_logs=self.args.no_logs, warp_to_landmarks=self.args.warp_to_landmarks, augment_color=self.args.augment_color, no_flip=self.args.no_flip, training_image_size=self.image_size, alignments_paths=self.alignments_paths, preview_scale=self.args.preview_scale, pingpong=self.args.pingpong, memory_saving_gradients=self.args.memory_saving_gradients, predict=False) logger.debug("Loaded Model") return model
def _load_model(self): """ Load the model requested for training. Returns ------- :file:`plugins.train.model` plugin The requested model plugin """ logger.debug("Loading Model") model_dir = get_folder(self._args.model_dir) configfile = self._args.configfile if hasattr(self._args, "configfile") else None augment_color = not self._args.no_augment_color resolver = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_host(resolver.master()) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) with strategy.scope(): model = PluginLoader.get_model(self.trainer_name)( model_dir, gpus=self._args.gpus, configfile=configfile, snapshot_interval=self._args.snapshot_interval, no_logs=self._args.no_logs, warp_to_landmarks=self._args.warp_to_landmarks, augment_color=augment_color, no_flip=self._args.no_flip, training_image_size=self._image_size, alignments_paths=self._alignments_paths, preview_scale=self._args.preview_scale, pingpong=self._args.pingpong, memory_saving_gradients=self._args.memory_saving_gradients, optimizer_savings=self._args.optimizer_savings, predict=False) logger.debug("Loaded Model") return model
def convert(self, video_file, swap_model=False, duration=None, start_time=None, use_gan=False, face_filter=False, photos=True, crop_x=None, width=None, side_by_side=False): # Magic incantation to not have tensorflow blow up with an out of memory error. import tensorflow as tf import keras.backend.tensorflow_backend as K config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = "0" K.set_session(tf.Session(config=config)) # Load model model_name = "Original" converter_name = "Masked" if use_gan: model_name = "GAN" converter_name = "GAN" model = PluginLoader.get_model(model_name)(Path( self._model_path(use_gan))) if not model.load(swap_model): print( 'model Not Found! A valid model must be provided to continue!') exit(1) # Load converter converter = PluginLoader.get_converter(converter_name) converter = converter(model.converter(False), blur_size=8, seamless_clone=True, mask_type="facehullandrect", erosion_kernel_size=None, smooth_mask=True, avg_color_adjust=True) # Load face filter filter_person = self._person_a if swap_model: filter_person = self._person_b filter = FaceFilter(self._people[filter_person]['faces']) # Define conversion method per frame def _convert_frame(frame, convert_colors=True): if convert_colors: frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV for face in DetectedFace(frame, "cnn"): if (not face_filter) or (face_filter and filter.check(face)): frame = converter.patch_image(frame, face) frame = frame.astype(numpy.float32) if convert_colors: frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV return frame def _convert_helper(get_frame, t): return _convert_frame(get_frame(t)) media_path = self._video_path({'name': video_file}) if not photos: # Process video; start loading the video clip video = VideoFileClip(media_path) # If a duration is set, trim clip if duration: video = video.subclip(start_time, start_time + duration) # Resize clip before processing if width: video = video.resize(width=width) # Crop clip if desired if crop_x: video = video.fx(crop, x2=video.w / 2) # Kick off convert frames for each frame new_video = video.fl(_convert_helper) # Stack clips side by side if side_by_side: def add_caption(caption, clip): text = (TextClip(caption, font='Amiri-regular', color='white', fontsize=80).margin(40).set_duration( clip.duration).on_color( color=(0, 0, 0), col_opacity=0.6)) return CompositeVideoClip([clip, text]) video = add_caption("Original", video) new_video = add_caption("Swapped", new_video) final_video = clips_array([[video], [new_video]]) else: final_video = new_video # Resize clip after processing #final_video = final_video.resize(width = (480 * 2)) # Write video output_path = os.path.join(self.OUTPUT_PATH, video_file) final_video.write_videofile(output_path, rewrite_audio=True) # Clean up del video del new_video del final_video else: # Process a directory of photos for face_file in os.listdir(media_path): face_path = os.path.join(media_path, face_file) image = cv2.imread(face_path) image = _convert_frame(image, convert_colors=False) cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
training_image_size = 256 alignments_paths = None preview_scale = 50 pingpong = False memory_saving_gradients = False optimizer_savings = False predict = False model = PluginLoader.get_model(trainer_name)( model_dir, gpus=gpus, configfile=configfile, snapshot_interval=snapshot_interval, no_logs=no_logs, warp_to_landmarks=warp_to_landmarks, augment_color=augment_color, no_flip=no_flip, training_image_size=training_image_size, alignments_paths=alignments_paths, preview_scale=preview_scale, pingpong=pingpong, memory_saving_gradients=memory_saving_gradients, optimizer_savings=optimizer_savings, predict=predict) model_sources = [ np.zeros( (batch_size * 64, input_shape[0], input_shape[1], input_shape[2])), np.zeros((batch_size * 64, input_shape[0], input_shape[1], 1)) ] model_targets = [