def __init__(self, is_interactive, merger_session_filepath, predictor_func, predictor_input_shape, face_enhancer_func, xseg_256_extract_func, merger_config, frames, frames_root_path, output_path, output_mask_path, model_iter, subprocess_count=4): if len(frames) == 0: raise ValueError("len (frames) == 0") super().__init__('Merger', InteractiveMergerSubprocessor.Cli, io_loop_sleep_time=0.001) self.is_interactive = is_interactive self.merger_session_filepath = Path(merger_session_filepath) self.merger_config = merger_config self.predictor_func = predictor_func self.predictor_input_shape = predictor_input_shape self.face_enhancer_func = face_enhancer_func self.xseg_256_extract_func = xseg_256_extract_func self.frames_root_path = frames_root_path self.output_path = output_path self.output_mask_path = output_mask_path self.model_iter = model_iter self.prefetch_frame_count = self.process_count = subprocess_count session_data = None if self.is_interactive and self.merger_session_filepath.exists(): io.input_skip_pending() # if io.input_bool ("Use saved session?", True): if True: try: with open(str(self.merger_session_filepath), "rb") as f: session_data = pickle.loads(f.read()) except Exception as e: pass rewind_to_frame_idx = None self.frames = frames self.frames_idxs = [*range(len(self.frames))] self.frames_done_idxs = [] if self.is_interactive and session_data is not None: # Loaded session data, check it s_frames = session_data.get('frames', None) s_frames_idxs = session_data.get('frames_idxs', None) s_frames_done_idxs = session_data.get('frames_done_idxs', None) s_model_iter = session_data.get('model_iter', None) frames_equal = (s_frames is not None) and \ (s_frames_idxs is not None) and \ (s_frames_done_idxs is not None) and \ (s_model_iter is not None) and \ (len(frames) == len(s_frames)) # frames count must match if frames_equal: for i in range(len(frames)): frame = frames[i] s_frame = s_frames[i] # frames filenames must match if frame.frame_info.filepath.name != s_frame.frame_info.filepath.name: frames_equal = False if not frames_equal: break if frames_equal: io.log_info('Using saved session from ' + '/'.join(self.merger_session_filepath.parts[-2:])) for frame in s_frames: if frame.cfg is not None: # recreate MergerConfig class using constructor with get_config() as dict params # so if any new param will be added, old merger session will work properly frame.cfg = frame.cfg.__class__( **frame.cfg.get_config()) self.frames = s_frames self.frames_idxs = s_frames_idxs self.frames_done_idxs = s_frames_done_idxs if self.model_iter != s_model_iter: # model was more trained, recompute all frames rewind_to_frame_idx = -1 for frame in self.frames: frame.is_done = False elif len(self.frames_idxs) == 0: # all frames are done? rewind_to_frame_idx = -1 if len(self.frames_idxs) != 0: cur_frame = self.frames[self.frames_idxs[0]] cur_frame.is_shown = False if not frames_equal: session_data = None if session_data is None: for filename in pathex.get_image_paths( self.output_path): #remove all images in output_path Path(filename).unlink() for filename in pathex.get_image_paths( self.output_mask_path ): #remove all images in output_mask_path Path(filename).unlink() frames[0].cfg = self.merger_config.copy() for i in range(len(self.frames)): frame = self.frames[i] frame.idx = i frame.output_filepath = self.output_path / ( frame.frame_info.filepath.stem + '.png') frame.output_mask_filepath = self.output_mask_path / ( frame.frame_info.filepath.stem + '.png') if not frame.output_filepath.exists() or \ not frame.output_mask_filepath.exists(): # if some frame does not exist, recompute and rewind frame.is_done = False frame.is_shown = False if rewind_to_frame_idx is None: rewind_to_frame_idx = i - 1 else: rewind_to_frame_idx = min(rewind_to_frame_idx, i - 1) if rewind_to_frame_idx is not None: while len(self.frames_done_idxs) > 0: if self.frames_done_idxs[-1] > rewind_to_frame_idx: prev_frame = self.frames[self.frames_done_idxs.pop()] self.frames_idxs.insert(0, prev_frame.idx) else: break
def __init__(self, is_interactive, merger_session_filepath, predictor_func, predictor_input_shape, merger_config, frames, frames_root_path, output_path, model_iter): if len(frames) == 0: raise ValueError("len (frames) == 0") super().__init__('Merger', MergeSubprocessor.Cli, 86400 if MERGER_DEBUG else 60, io_loop_sleep_time=0.001) self.is_interactive = is_interactive self.merger_session_filepath = Path(merger_session_filepath) self.merger_config = merger_config self.predictor_func_host, self.predictor_func = SubprocessFunctionCaller.make_pair( predictor_func) self.predictor_input_shape = predictor_input_shape self.face_enhancer = None def superres_func(mode, face_bgr): if mode == 1: if self.face_enhancer is None: self.face_enhancer = FaceEnhancer(place_model_on_cpu=True) return self.face_enhancer.enhance(face_bgr, is_tanh=True, preserve_size=False) self.superres_host, self.superres_func = SubprocessFunctionCaller.make_pair( superres_func) self.fanseg_by_face_type = {} self.fanseg_input_size = 256 def fanseg_extract_func(face_type, *args, **kwargs): fanseg = self.fanseg_by_face_type.get(face_type, None) if self.fanseg_by_face_type.get(face_type, None) is None: cpu_only = len(nn.getCurrentDeviceConfig().devices) == 0 with nn.tf.device('/CPU:0' if cpu_only else '/GPU:0'): fanseg = TernausNet("FANSeg", self.fanseg_input_size, FaceType.toString(face_type), place_model_on_cpu=True) self.fanseg_by_face_type[face_type] = fanseg return fanseg.extract(*args, **kwargs) self.fanseg_host, self.fanseg_extract_func = SubprocessFunctionCaller.make_pair( fanseg_extract_func) self.frames_root_path = frames_root_path self.output_path = output_path self.model_iter = model_iter self.prefetch_frame_count = self.process_count = min( 6, multiprocessing.cpu_count()) session_data = None if self.is_interactive and self.merger_session_filepath.exists(): io.input_skip_pending() if io.input_bool("Use saved session?", True): try: with open(str(self.merger_session_filepath), "rb") as f: session_data = pickle.loads(f.read()) except Exception as e: pass self.frames = frames self.frames_idxs = [*range(len(self.frames))] self.frames_done_idxs = [] if self.is_interactive and session_data is not None: # Loaded session data, check it s_frames = session_data.get('frames', None) s_frames_idxs = session_data.get('frames_idxs', None) s_frames_done_idxs = session_data.get('frames_done_idxs', None) s_model_iter = session_data.get('model_iter', None) frames_equal = (s_frames is not None) and \ (s_frames_idxs is not None) and \ (s_frames_done_idxs is not None) and \ (s_model_iter is not None) and \ (len(frames) == len(s_frames)) # frames count must match if frames_equal: for i in range(len(frames)): frame = frames[i] s_frame = s_frames[i] # frames filenames must match if frame.frame_info.filepath.name != s_frame.frame_info.filepath.name: frames_equal = False if not frames_equal: break if frames_equal: io.log_info('Using saved session from ' + '/'.join(self.merger_session_filepath.parts[-2:])) for frame in s_frames: if frame.cfg is not None: # recreate MergerConfig class using constructor with get_config() as dict params # so if any new param will be added, old merger session will work properly frame.cfg = frame.cfg.__class__( **frame.cfg.get_config()) self.frames = s_frames self.frames_idxs = s_frames_idxs self.frames_done_idxs = s_frames_done_idxs rewind_to_begin = len( self.frames_idxs) == 0 # all frames are done? if self.model_iter != s_model_iter: # model was more trained, recompute all frames rewind_to_begin = True for frame in self.frames: frame.is_done = False if rewind_to_begin: while len(self.frames_done_idxs) > 0: prev_frame = self.frames[self.frames_done_idxs.pop()] self.frames_idxs.insert(0, prev_frame.idx) if len(self.frames_idxs) != 0: cur_frame = self.frames[self.frames_idxs[0]] cur_frame.is_shown = False if not frames_equal: session_data = None if session_data is None: for filename in pathex.get_image_paths( self.output_path): #remove all images in output_path Path(filename).unlink() frames[0].cfg = self.merger_config.copy() for i in range(len(self.frames)): frame = self.frames[i] frame.idx = i frame.output_filepath = self.output_path / ( frame.frame_info.filepath.stem + '.png')
def __init__(self, is_training=False, saved_models_path=None, training_data_src_path=None, training_data_dst_path=None, pretraining_data_path=None, pretrained_model_path=None, no_preview=False, force_model_name=None, force_gpu_idxs=None, cpu_only=False, debug=False, force_model_class_name=None, **kwargs): self.is_training = is_training self.saved_models_path = saved_models_path self.training_data_src_path = training_data_src_path self.training_data_dst_path = training_data_dst_path self.pretraining_data_path = pretraining_data_path self.pretrained_model_path = pretrained_model_path self.no_preview = no_preview self.debug = debug self.model_class_name = model_class_name = Path(inspect.getmodule(self).__file__).parent.name.rsplit("_", 1)[1] if force_model_class_name is None: if force_model_name is not None: self.model_name = force_model_name else: while True: # gather all model dat files saved_models_names = [] for filepath in pathex.get_file_paths(saved_models_path): filepath_name = filepath.name if filepath_name.endswith(f'{model_class_name}_data.dat'): saved_models_names += [ (filepath_name.split('_')[0], os.path.getmtime(filepath)) ] # sort by modified datetime saved_models_names = sorted(saved_models_names, key=operator.itemgetter(1), reverse=True ) saved_models_names = [ x[0] for x in saved_models_names ] if len(saved_models_names) != 0: io.log_info ("Choose one of saved models, or enter a name to create a new model.") io.log_info ("[r] : rename") io.log_info ("[d] : delete") io.log_info ("") for i, model_name in enumerate(saved_models_names): s = f"[{i}] : {model_name} " if i == 0: s += "- latest" io.log_info (s) inp = io.input_str(f"", "0", show_default_value=False ) model_idx = -1 try: model_idx = np.clip ( int(inp), 0, len(saved_models_names)-1 ) except: pass if model_idx == -1: if len(inp) == 1: is_rename = inp[0] == 'r' is_delete = inp[0] == 'd' if is_rename or is_delete: if len(saved_models_names) != 0: if is_rename: name = io.input_str(f"Enter the name of the model you want to rename") elif is_delete: name = io.input_str(f"Enter the name of the model you want to delete") if name in saved_models_names: if is_rename: new_model_name = io.input_str(f"Enter new name of the model") for filepath in pathex.get_paths(saved_models_path): filepath_name = filepath.name model_filename, remain_filename = filepath_name.split('_', 1) if model_filename == name: if is_rename: new_filepath = filepath.parent / ( new_model_name + '_' + remain_filename ) filepath.rename (new_filepath) elif is_delete: filepath.unlink() continue self.model_name = inp else: self.model_name = saved_models_names[model_idx] else: self.model_name = io.input_str(f"No saved models found. Enter a name of a new model", "new") self.model_name = self.model_name.replace('_', ' ') break self.model_name = self.model_name + '_' + self.model_class_name else: self.model_name = force_model_class_name self.iter = 0 self.options = {} self.loss_history = [] self.sample_for_preview = None self.choosed_gpu_indexes = None model_data = {} self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') ) if self.model_data_path.exists(): io.log_info (f"Loading {self.model_name} model...") model_data = pickle.loads ( self.model_data_path.read_bytes() ) self.iter = model_data.get('iter',0) if self.iter != 0: self.options = model_data['options'] self.loss_history = model_data.get('loss_history', []) self.sample_for_preview = model_data.get('sample_for_preview', None) self.choosed_gpu_indexes = model_data.get('choosed_gpu_indexes', None) if self.is_first_run(): io.log_info ("\nModel first run.") self.device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(suggest_best_multi_gpu=True)) \ if not cpu_only else nn.DeviceConfig.CPU() nn.initialize(self.device_config) #### self.default_options_path = saved_models_path / f'{self.model_class_name}_default_options.dat' self.default_options = {} if self.default_options_path.exists(): try: self.default_options = pickle.loads ( self.default_options_path.read_bytes() ) except: pass self.choose_preview_history = False self.batch_size = self.load_or_def_option('batch_size', 1) ##### io.input_skip_pending() self.on_initialize_options() if self.is_first_run(): # save as default options only for first run model initialize self.default_options_path.write_bytes( pickle.dumps (self.options) ) self.autobackup_hour = self.options.get('autobackup_hour', 0) self.write_preview_history = self.options.get('write_preview_history', False) self.target_iter = self.options.get('target_iter',0) self.random_flip = self.options.get('random_flip',True) self.on_initialize() self.options['batch_size'] = self.batch_size if self.is_training: self.preview_history_path = self.saved_models_path / ( f'{self.get_model_name()}_history' ) self.autobackups_path = self.saved_models_path / ( f'{self.get_model_name()}_autobackups' ) if self.write_preview_history or io.is_colab(): if not self.preview_history_path.exists(): self.preview_history_path.mkdir(exist_ok=True) else: if self.iter == 0: for filename in pathex.get_image_paths(self.preview_history_path): Path(filename).unlink() if self.generator_list is None: raise ValueError( 'You didnt set_training_data_generators()') else: for i, generator in enumerate(self.generator_list): if not isinstance(generator, SampleGeneratorBase): raise ValueError('training data generator is not subclass of SampleGeneratorBase') self.update_sample_for_preview(choose_preview_history=self.choose_preview_history) if self.autobackup_hour != 0: self.autobackup_start_time = time.time() if not self.autobackups_path.exists(): self.autobackups_path.mkdir(exist_ok=True) io.log_info( self.get_summary_text() )