def __init__(self, is_interactive, converter_config, frames, output_path): if len(frames) == 0: raise ValueError("len (frames) == 0") super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if CONVERTER_DEBUG else 60, io_loop_sleep_time=0.001, initialize_subprocesses_in_serial=False ) # if debug == True else 60) self.is_interactive = is_interactive self.converter_config = converter_config #dummy predict and sleep, tensorflow caching kernels. If remove it, sometime conversion speed can be x2 slower self.converter_config.predictor_func(dummy_predict=True) time.sleep(2) self.predictor_func_host, self.predictor_func = SubprocessFunctionCaller.make_pair( self.converter_config.predictor_func) self.converter_config.predictor_func = None self.dcscn = None self.ranksrgan = None def superres_func(mode, *args, **kwargs): if mode == 1: if self.ranksrgan is None: self.ranksrgan = imagelib.RankSRGAN() return self.ranksrgan.upscale(*args, **kwargs) self.dcscn_host, self.superres_func = SubprocessFunctionCaller.make_pair( superres_func) self.frames = frames self.output_path = output_path self.prefetch_frame_count = self.process_count = min( 6, multiprocessing.cpu_count()) self.frames_idxs = [*range(len(self.frames))] self.frames_done_idxs = [] digits = [str(i) for i in range(10)] for i in range(len(self.frames)): frame = self.frames[i] frame.idx = i inp_stem = Path(frame.frame_info.filename).stem if len([True for symbol in inp_stem if symbol not in digits]) > 0: frame.output_filename = self.output_path / ('%.5d.png' % (i + 1)) else: frame.output_filename = self.output_path / (inp_stem + '.png') frames[0].cfg = self.converter_config.copy()
def __init__(self, is_interactive, converter_config, frames, output_path): if len (frames) == 0: raise ValueError ("len (frames) == 0") super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if CONVERTER_DEBUG else 60, io_loop_sleep_time=0.001, initialize_subprocesses_in_serial=False)# if debug == True else 60) self.is_interactive = is_interactive self.converter_config = converter_config #dummy predict and sleep, tensorflow caching kernels. If remove it, sometime conversion speed can be x2 slower self.converter_config.predictor_func (dummy_predict=True) time.sleep(2) self.predictor_func_host, self.predictor_func = SubprocessFunctionCaller.make_pair(self.converter_config.predictor_func) self.converter_config.predictor_func = None self.dcscn = None def DCSCN_upscale(*args, **kwargs): if self.dcscn is None: self.dcscn = imagelib.DCSCN() return self.dcscn.upscale(*args, **kwargs) self.dcscn_host, self.dcscn_upscale_func = SubprocessFunctionCaller.make_pair(DCSCN_upscale) self.frames = frames self.output_path = output_path self.prefetch_frame_count = self.process_count = min(6,multiprocessing.cpu_count()) self.frames_idxs = [ *range(len(self.frames)) ] self.frames_done_idxs = [] for i in range( len(self.frames) ): frame = self.frames[i] frame.idx = i frame.output_filename = self.output_path / ('%.5d.png' % i) frames[0].cfg = self.converter_config.copy()
def __init__(self, predictor_func, predictor_input_size=0): super().__init__(predictor_func, Converter.TYPE_FACE_AVATAR) self.predictor_input_size = predictor_input_size #dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower predictor_func ( np.zeros ( (predictor_input_size,predictor_input_size,3), dtype=np.float32 ), np.zeros ( (predictor_input_size,predictor_input_size,1), dtype=np.float32 ) ) time.sleep(2) predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair(predictor_func) self.predictor_func_host = AntiPickler(predictor_func_host) self.predictor_func = predictor_func
def __init__(self, converter, input_path_image_paths, output_path, alignments, debug=False): super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if debug == True else 60) self.converter = converter self.host_processor, self.cli_func = SubprocessFunctionCaller.make_pair( self.converter.predictor_func) self.process_converter = self.converter.copy_and_set_predictor( self.cli_func) self.input_data = self.input_path_image_paths = input_path_image_paths self.output_path = output_path self.alignments = alignments self.debug = debug self.files_processed = 0 self.faces_processed = 0
def __init__(self, is_interactive, converter_session_filepath, predictor_func, predictor_input_shape, converter_config, frames, output_path, model_iter): if len (frames) == 0: raise ValueError ("len (frames) == 0") super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if CONVERTER_DEBUG else 60, io_loop_sleep_time=0.001, initialize_subprocesses_in_serial=False) self.is_interactive = is_interactive self.converter_session_filepath = Path(converter_session_filepath) self.converter_config = converter_config #dummy predict and sleep, tensorflow caching kernels. If remove it, sometime conversion speed can be x2 slower predictor_func (dummy_predict=True) time.sleep(2) self.predictor_func_host, self.predictor_func = SubprocessFunctionCaller.make_pair(predictor_func) self.predictor_input_shape = predictor_input_shape self.dcscn = None self.ranksrgan = None def superres_func(mode, *args, **kwargs): if mode == 1: if self.ranksrgan is None: self.ranksrgan = imagelib.RankSRGAN() return self.ranksrgan.upscale(*args, **kwargs) self.dcscn_host, self.superres_func = SubprocessFunctionCaller.make_pair(superres_func) self.output_path = output_path self.model_iter = model_iter self.prefetch_frame_count = self.process_count = min(6,multiprocessing.cpu_count()) session_data = None if self.is_interactive and self.converter_session_filepath.exists(): if io.input_bool ("Use saved session? (y/n skip:y) : ", True): try: with open( str(self.converter_session_filepath), "rb") as f: session_data = pickle.loads(f.read()) except Exception as e: pass self.frames = frames self.frames_idxs = [ *range(len(self.frames)) ] self.frames_done_idxs = [] if self.is_interactive and session_data is not None: s_frames = session_data.get('frames', None) s_frames_idxs = session_data.get('frames_idxs', None) s_frames_done_idxs = session_data.get('frames_done_idxs', None) s_model_iter = session_data.get('model_iter', None) frames_equal = (s_frames is not None) and \ (s_frames_idxs is not None) and \ (s_frames_done_idxs is not None) and \ (s_model_iter is not None) and \ (len(frames) == len(s_frames)) if frames_equal: for i in range(len(frames)): frame = frames[i] s_frame = s_frames[i] if frame.frame_info.filename != s_frame.frame_info.filename: frames_equal = False if not frames_equal: break if frames_equal: io.log_info ('Using saved session from ' + '/'.join (self.converter_session_filepath.parts[-2:]) ) self.frames = s_frames self.frames_idxs = s_frames_idxs self.frames_done_idxs = s_frames_done_idxs if self.model_iter != s_model_iter: #model is more trained, recompute all frames for frame in self.frames: frame.is_done = False if self.model_iter != s_model_iter or \ len(self.frames_idxs) == 0: #rewind to begin if model is more trained or all frames are done while len(self.frames_done_idxs) > 0: prev_frame = self.frames[self.frames_done_idxs.pop()] self.frames_idxs.insert(0, prev_frame.idx) if len(self.frames_idxs) != 0: cur_frame = self.frames[self.frames_idxs[0]] cur_frame.is_shown = False if not frames_equal: session_data = None if session_data is None: for filename in Path_utils.get_image_paths(self.output_path): #remove all images in output_path Path(filename).unlink() frames[0].cfg = self.converter_config.copy() for i in range( len(self.frames) ): frame = self.frames[i] frame.idx = i frame.output_filename = self.output_path / ( Path(frame.frame_info.filename).stem + '.png' )
def __init__(self, predictor_func, predictor_input_size=0, predictor_masked=True, face_type=FaceType.FULL, default_mode=4, base_erode_mask_modifier=0, base_blur_mask_modifier=0, default_erode_mask_modifier=0, default_blur_mask_modifier=0, clip_hborder_mask_per=0): super().__init__(predictor_func, Converter.TYPE_FACE) #dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower predictor_func( np.zeros((predictor_input_size, predictor_input_size, 3), dtype=np.float32)) time.sleep(2) predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair( predictor_func) self.predictor_func_host = AntiPickler(predictor_func_host) self.predictor_func = predictor_func self.predictor_masked = predictor_masked self.predictor_input_size = predictor_input_size self.face_type = face_type self.clip_hborder_mask_per = clip_hborder_mask_per mode = io.input_int( "Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) raw. Default - %d : " % (default_mode), default_mode) mode_dict = { 1: 'overlay', 2: 'hist-match', 3: 'hist-match-bw', 4: 'seamless', 5: 'raw' } self.mode = mode_dict.get(mode, mode_dict[default_mode]) if self.mode == 'raw': mode = io.input_int( "Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ", 2) self.raw_mode = { 1: 'rgb', 2: 'rgb-mask', 3: 'mask-only', 4: 'predicted-only' }.get(mode, 'rgb-mask') if self.mode != 'raw': if self.mode == 'seamless': if io.input_bool("Seamless hist match? (y/n skip:n) : ", False): self.mode = 'seamless-hist-match' if self.mode == 'hist-match' or self.mode == 'hist-match-bw': self.masked_hist_match = io.input_bool( "Masked hist match? (y/n skip:y) : ", True) if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match': self.hist_match_threshold = np.clip( io.input_int( "Hist match threshold [0..255] (skip:255) : ", 255), 0, 255) if face_type == FaceType.FULL: self.mask_mode = np.clip( io.input_int( "Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst , (5) FAN-prd*FAN-dst (6) learned*FAN-prd*FAN-dst (?) help. Default - %d : " % (1), 1, help_message= "If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks." ), 1, 6) else: self.mask_mode = np.clip( io.input_int( "Mask mode: (1) learned, (2) dst . Default - %d : " % (1), 1), 1, 2) if self.mask_mode >= 3 and self.mask_mode <= 6: self.fan_seg = None if self.mode != 'raw': self.erode_mask_modifier = base_erode_mask_modifier + np.clip( io.input_int( "Choose erode mask modifier [-200..200] (skip:%d) : " % (default_erode_mask_modifier), default_erode_mask_modifier), -200, 200) self.blur_mask_modifier = base_blur_mask_modifier + np.clip( io.input_int( "Choose blur mask modifier [-200..200] (skip:%d) : " % (default_blur_mask_modifier), default_blur_mask_modifier), -200, 200) self.output_face_scale = np.clip( 1.0 + io.input_int( "Choose output face scale modifier [-50..50] (skip:0) : ", 0) * 0.01, 0.5, 1.5) if self.mode != 'raw': self.color_transfer_mode = io.input_str( "Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct', 'lct']) self.super_resolution = io.input_bool( "Apply super resolution? (y/n ?:help skip:n) : ", False, help_message="Enhance details by applying DCSCN network.") if self.mode != 'raw': self.final_image_color_degrade_power = np.clip( io.input_int( "Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100) self.alpha = io.input_bool( "Export png with alpha channel? (y/n skip:n) : ", False) io.log_info("") if self.super_resolution: host_proc, dc_upscale = SubprocessFunctionCaller.make_pair( imagelib.DCSCN().upscale) self.dc_host = AntiPickler(host_proc) self.dc_upscale = dc_upscale else: self.dc_host = None
def __init__(self, predictor_func, predictor_input_size=0, predictor_masked=True, face_type=FaceType.FULL, default_mode=4, base_erode_mask_modifier=0, base_blur_mask_modifier=0, default_erode_mask_modifier=0, default_blur_mask_modifier=0, clip_hborder_mask_per=0, force_mask_mode=-1): super().__init__(predictor_func, Converter.TYPE_FACE) # dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower predictor_func( np.zeros((predictor_input_size, predictor_input_size, 3), dtype=np.float32)) time.sleep(2) predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair( predictor_func) self.predictor_func_host = AntiPickler(predictor_func_host) self.predictor_func = predictor_func self.predictor_masked = predictor_masked self.predictor_input_size = predictor_input_size self.face_type = face_type self.clip_hborder_mask_per = clip_hborder_mask_per mode = io.input_int( "选择模式: (1)覆盖,(2)直方图匹配,(3)直方图匹配白平衡,(4)无缝,(5)raw. 默认 - %d : " % (default_mode), default_mode) mode_dict = { 1: 'overlay', 2: 'hist-match', 3: 'hist-match-bw', 4: 'seamless', 5: 'raw' } self.mode = mode_dict.get(mode, mode_dict[default_mode]) if self.mode == 'raw': mode = io.input_int( "选择raw模式: (1) rgb, (2) rgb+掩码(默认),(3)仅掩码,(4)仅预测 : ", 2) self.raw_mode = { 1: 'rgb', 2: 'rgb-mask', 3: 'mask-only', 4: 'predicted-only' }.get(mode, 'rgb-mask') if self.mode != 'raw': if self.mode == 'seamless': if io.input_bool("无缝直方图匹配? (y/n 默认:n) : ", False): self.mode = 'seamless-hist-match' if self.mode == 'hist-match' or self.mode == 'hist-match-bw': self.masked_hist_match = io.input_bool( "面部遮罩直方图匹配? (y/n 默认:y) : ", True) if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match': self.hist_match_threshold = np.clip( io.input_int("直方图匹配阈值 [0..255] (skip:255) : ", 255), 0, 255) if force_mask_mode != -1: self.mask_mode = force_mask_mode else: if face_type == FaceType.FULL: self.mask_mode = np.clip( io.input_int( "面部遮罩模式: (1) 学习, (2) dst原始视频, (3) FAN-prd, (4) FAN-dst , (5) FAN-prd*FAN-dst (6) learned*FAN-prd*FAN-dst (?) 帮助. 默认 - %d : " % (1), 1, help_message= "如果你学过蒙版,那么选择选项1.“dst”遮罩是原始的抖动遮罩从dst对齐的图像.“扇-prd”-使用超光滑的面具,通过预先训练的扇模型从预测的脸.“风扇-dst”-使用超光滑的面具,由预先训练的风扇模型从dst的脸.“FAN-prd*FAN-dst”或“learned*FAN-prd*FAN-dst”——使用多个口罩." ), 1, 6) else: self.mask_mode = np.clip( io.input_int("面部遮罩模式: (1) 学习, (2) dst . 默认 - %d : " % (1), 1), 1, 2) if self.mask_mode >= 3 and self.mask_mode <= 6: self.fan_seg = None if self.mode != 'raw': self.erode_mask_modifier = base_erode_mask_modifier + np.clip( io.input_int( "侵蚀遮罩 [-200..200] (默认:%d) : " % (default_erode_mask_modifier), default_erode_mask_modifier), -200, 200) self.blur_mask_modifier = base_blur_mask_modifier + np.clip( io.input_int( "选择模糊遮罩边缘 [-200..200] (默认:%d) : " % (default_blur_mask_modifier), default_blur_mask_modifier), -200, 200) self.output_face_scale = np.clip( 1.0 + io.input_int("选择输出脸部比例调整器 [-50..50] (默认:0) : ", 0) * 0.01, 0.5, 1.5) if self.mode != 'raw': self.color_transfer_mode = io.input_str( "应用颜色转移到预测的脸? 选择模式 ( rct/lct 默认:None ) : ", None, ['rct', 'lct']) self.super_resolution = io.input_bool("应用超分辨率? (y/n ?:帮助 默认:n) : ", False, help_message="通过应用DCSCN网络增强细节.") if self.mode != 'raw': self.final_image_color_degrade_power = np.clip( io.input_int("降低最终图像色权 [0..100] (默认:0) : ", 0), 0, 100) self.alpha = io.input_bool("使用alpha通道导出png? (y/n 默认:n) : ", False) io.log_info("") if self.super_resolution: host_proc, dc_upscale = SubprocessFunctionCaller.make_pair( imagelib.DCSCN().upscale) self.dc_host = AntiPickler(host_proc) self.dc_upscale = dc_upscale else: self.dc_host = None