def train_one_iter(self): sample = self.generate_next_sample() iter_time = time.time() losses = self.onTrainOneIter(sample, self.generator_list) iter_time = time.time() - iter_time self.last_sample = sample self.loss_history.append ( [float(loss[1]) for loss in losses] ) if self.iter % 10 == 0: plist = [] if io.is_colab(): previews = self.get_previews() for i in range(len(previews)): name, bgr = previews[i] plist += [ (bgr, self.get_strpath_storage_for_file('preview_%s.jpg' % (name) ) ) ] if self.write_preview_history: plist += [ (self.get_static_preview(), str (self.preview_history_path / ('%.6d.jpg' % (self.iter))) ) ] for preview, filepath in plist: preview_lh = ModelBase.get_loss_history_preview(self.loss_history, self.iter, preview.shape[1], preview.shape[2]) img = (np.concatenate ( [preview_lh, preview], axis=0 ) * 255).astype(np.uint8) cv2_imwrite (filepath, img ) self.iter += 1 return self.iter, iter_time
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, debug=False, device_args=None, ask_write_preview_history=True, ask_target_iter=True, ask_batch_size=True, ask_sort_by_yaw=True, ask_random_flip=True, ask_src_scale_mod=True): device_args['force_gpu_idx'] = device_args.get('force_gpu_idx', -1) device_args['cpu_only'] = device_args.get('cpu_only', False) if device_args['force_gpu_idx'] == -1 and not device_args['cpu_only']: idxs_names_list = nnlib.device.getValidDevicesIdxsWithNamesList() if len(idxs_names_list) > 1: io.log_info("You have multi GPUs in a system: ") for idx, name in idxs_names_list: io.log_info("[%d] : %s" % (idx, name)) device_args['force_gpu_idx'] = io.input_int( "Which GPU idx to choose? ( skip: best GPU ) : ", -1, [x[0] for x in idxs_names_list]) self.device_args = device_args self.device_config = nnlib.DeviceConfig(allow_growth=False, **self.device_args) io.log_info("Loading model...") self.model_path = model_path self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat')) self.training_data_src_path = training_data_src_path self.training_data_dst_path = training_data_dst_path self.src_images_paths = None self.dst_images_paths = None self.src_yaw_images_paths = None self.dst_yaw_images_paths = None self.src_data_generator = None self.dst_data_generator = None self.debug = debug self.is_training_mode = (training_data_src_path is not None and training_data_dst_path is not None) self.iter = 0 self.options = {} self.loss_history = [] self.sample_for_preview = None model_data = {} if self.model_data_path.exists(): model_data = pickle.loads(self.model_data_path.read_bytes()) self.iter = max(model_data.get('iter', 0), model_data.get('epoch', 0)) if 'epoch' in self.options: self.options.pop('epoch') if self.iter != 0: self.options = model_data['options'] self.loss_history = model_data[ 'loss_history'] if 'loss_history' in model_data.keys( ) else [] self.sample_for_preview = model_data[ 'sample_for_preview'] if 'sample_for_preview' in model_data.keys( ) else None ask_override = self.is_training_mode and self.iter != 0 and io.input_in_time( "Press enter in 2 seconds to override model settings.", 2) yn_str = {True: 'y', False: 'n'} if self.iter == 0: io.log_info( "\nModel first run. Enter model options as default for each run." ) if ask_write_preview_history and (self.iter == 0 or ask_override): default_write_preview_history = False if self.iter == 0 else self.options.get( 'write_preview_history', False) self.options['write_preview_history'] = io.input_bool( "Write preview history? (y/n ?:help skip:%s) : " % (yn_str[default_write_preview_history]), default_write_preview_history, help_message= "Preview history will be writed to <ModelName>_history folder." ) else: self.options['write_preview_history'] = self.options.get( 'write_preview_history', False) if ask_target_iter: if (self.iter == 0 or ask_override): self.options['target_iter'] = max( 0, io.input_int( "Target iteration (skip:unlimited/default) : ", 0)) else: self.options['target_iter'] = max( model_data.get('target_iter', 0), self.options.get('target_epoch', 0)) if 'target_epoch' in self.options: self.options.pop('target_epoch') if ask_batch_size and (self.iter == 0 or ask_override): default_batch_size = 0 if self.iter == 0 else self.options.get( 'batch_size', 0) self.options['batch_size'] = max( 0, io.input_int( "Batch_size (?:help skip:%d) : " % (default_batch_size), default_batch_size, help_message= "Larger batch size is better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually." )) else: self.options['batch_size'] = self.options.get('batch_size', 0) if ask_sort_by_yaw: if (self.iter == 0): self.options['sort_by_yaw'] = io.input_bool( "Feed faces to network sorted by yaw? (y/n ?:help skip:n) : ", False, help_message= "NN will not learn src face directions that don't match dst face directions. Do not use if the dst face has hair that covers the jaw." ) else: self.options['sort_by_yaw'] = self.options.get( 'sort_by_yaw', False) if ask_random_flip: if (self.iter == 0): self.options['random_flip'] = io.input_bool( "Flip faces randomly? (y/n ?:help skip:y) : ", True, help_message= "Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset." ) else: self.options['random_flip'] = self.options.get( 'random_flip', True) if ask_src_scale_mod: if (self.iter == 0): self.options['src_scale_mod'] = np.clip( io.input_int( "Src face scale modifier % ( -30...30, ?:help skip:0) : ", 0, help_message= "If src face shape is wider than dst, try to decrease this value to get a better result." ), -30, 30) else: self.options['src_scale_mod'] = self.options.get( 'src_scale_mod', 0) self.write_preview_history = self.options.get('write_preview_history', False) if not self.write_preview_history and 'write_preview_history' in self.options: self.options.pop('write_preview_history') self.target_iter = self.options.get('target_iter', 0) if self.target_iter == 0 and 'target_iter' in self.options: self.options.pop('target_iter') self.batch_size = self.options.get('batch_size', 0) self.sort_by_yaw = self.options.get('sort_by_yaw', False) self.random_flip = self.options.get('random_flip', True) self.src_scale_mod = self.options.get('src_scale_mod', 0) if self.src_scale_mod == 0 and 'src_scale_mod' in self.options: self.options.pop('src_scale_mod') self.onInitializeOptions(self.iter == 0, ask_override) nnlib.import_all(self.device_config) self.keras = nnlib.keras self.K = nnlib.keras.backend self.onInitialize() self.options['batch_size'] = self.batch_size if self.debug or self.batch_size == 0: self.batch_size = 1 if self.is_training_mode: if self.device_args['force_gpu_idx'] == -1: self.preview_history_path = self.model_path / ( '%s_history' % (self.get_model_name())) else: self.preview_history_path = self.model_path / ( '%d_%s_history' % (self.device_args['force_gpu_idx'], self.get_model_name())) if self.write_preview_history or io.is_colab(): if not self.preview_history_path.exists(): self.preview_history_path.mkdir(exist_ok=True) else: if self.iter == 0: for filename in Path_utils.get_image_paths( self.preview_history_path): Path(filename).unlink() if self.generator_list is None: raise ValueError('You didnt set_training_data_generators()') else: for i, generator in enumerate(self.generator_list): if not isinstance(generator, SampleGeneratorBase): raise ValueError( 'training data generator is not subclass of SampleGeneratorBase' ) if (self.sample_for_preview is None) or (self.iter == 0): self.sample_for_preview = self.generate_next_sample() model_summary_text = [] model_summary_text += ["===== Model summary ====="] model_summary_text += ["== Model name: " + self.get_model_name()] model_summary_text += ["=="] model_summary_text += ["== Current iteration: " + str(self.iter)] model_summary_text += ["=="] model_summary_text += ["== Model options:"] for key in self.options.keys(): model_summary_text += ["== |== %s : %s" % (key, self.options[key])] if self.device_config.multi_gpu: model_summary_text += ["== |== multi_gpu : True "] model_summary_text += ["== Running on:"] if self.device_config.cpu_only: model_summary_text += ["== |== [CPU]"] else: for idx in self.device_config.gpu_idxs: model_summary_text += [ "== |== [%d : %s]" % (idx, nnlib.device.getDeviceName(idx)) ] if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[ 0] == 2: model_summary_text += ["=="] model_summary_text += [ "== WARNING: You are using 2GB GPU. Result quality may be significantly decreased." ] model_summary_text += [ "== If training does not start, close all programs and try again." ] model_summary_text += [ "== Also you can disable Windows Aero Desktop to get extra free VRAM." ] model_summary_text += ["=="] model_summary_text += ["========================="] model_summary_text = "\r\n".join(model_summary_text) self.model_summary_text = model_summary_text io.log_info(model_summary_text)
def trainerThread(s2c, c2s, args, device_args): while True: try: start_time = time.time() training_data_src_path = Path(args.get('training_data_src_dir', '')) training_data_dst_path = Path(args.get('training_data_dst_dir', '')) model_path = Path(args.get('model_path', '')) model_name = args.get('model_name', '') save_interval_min = 15 debug = args.get('debug', '') execute_programs = args.get('execute_programs', []) if not training_data_src_path.exists(): io.log_err('Training data src directory does not exist.') break if not training_data_dst_path.exists(): io.log_err('Training data dst directory does not exist.') break if not model_path.exists(): model_path.mkdir(exist_ok=True) model = models.import_model(model_name)( model_path, training_data_src_path=training_data_src_path, training_data_dst_path=training_data_dst_path, debug=debug, device_args=device_args) is_reached_goal = model.is_reached_iter_goal() shared_state = {'after_save': False} loss_string = "" save_iter = model.get_iter() def model_save(): if not debug and not is_reached_goal: io.log_info("Saving....", end='\r') model.save() shared_state['after_save'] = True def send_preview(): if not debug: previews = model.get_previews() c2s.put({ 'op': 'show', 'previews': previews, 'iter': model.get_iter(), 'loss_history': model.get_loss_history().copy() }) else: previews = [('debug, press update for new', model.debug_one_iter())] c2s.put({'op': 'show', 'previews': previews}) if model.is_first_run(): model_save() if model.get_target_iter() != 0: if is_reached_goal: io.log_info( 'Model already trained to target iteration. You can use preview.' ) else: io.log_info( 'Starting. Target iteration: %d. Press "Enter" to stop training and save model.' % (model.get_target_iter())) else: io.log_info( 'Starting. Press "Enter" to stop training and save model.') last_save_time = time.time() for i in itertools.count(0, 1): if not debug: cur_time = time.time() for x in execute_programs: prog_time, prog = x if prog_time != 0 and (cur_time - start_time) >= prog_time: x[0] = 0 try: exec(prog) except Exception as e: print("Unable to execute program: %s" % (prog)) if not is_reached_goal: iter, iter_time = model.train_one_iter() loss_history = model.get_loss_history() time_str = time.strftime("[%H:%M:%S]") if iter_time >= 10: loss_string = "{0}[#{1:06d}][{2:.5s}s]".format( time_str, iter, '{:0.4f}'.format(iter_time)) else: loss_string = "{0}[#{1:06d}][{2:04d}ms]".format( time_str, iter, int(iter_time * 1000)) if shared_state['after_save']: shared_state['after_save'] = False last_save_time = time.time( ) #upd last_save_time only after save+one_iter, because plaidML rebuilds programs after save https://github.com/plaidml/plaidml/issues/274 mean_loss = np.mean([ np.array(loss_history[i]) for i in range(save_iter, iter) ], axis=0) for loss_value in mean_loss: loss_string += "[%.4f]" % (loss_value) io.log_info(loss_string) save_iter = iter else: for loss_value in loss_history[-1]: loss_string += "[%.4f]" % (loss_value) if io.is_colab(): io.log_info('\r' + loss_string, end='') else: io.log_info(loss_string, end='\r') if model.get_target_iter( ) != 0 and model.is_reached_iter_goal(): io.log_info('Reached target iteration.') model_save() is_reached_goal = True io.log_info('You can use preview now.') if not is_reached_goal and (time.time() - last_save_time ) >= save_interval_min * 60: model_save() send_preview() if i == 0: if is_reached_goal: model.pass_one_iter() send_preview() if debug: time.sleep(0.005) while not s2c.empty(): input = s2c.get() op = input['op'] if op == 'save': model_save() elif op == 'preview': if is_reached_goal: model.pass_one_iter() send_preview() elif op == 'close': model_save() i = -1 break if i == -1: break model.finalize() except Exception as e: print('Error: %s' % (str(e))) traceback.print_exc() break c2s.put({'op': 'close'})
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, pretraining_data_path=None, debug=False, device_args=None, ask_enable_autobackup=True, ask_write_preview_history=True, ask_target_iter=True, ask_batch_size=True, ask_sort_by_yaw=True, ask_random_flip=True, ask_src_scale_mod=True): device_args['force_gpu_idx'] = device_args.get('force_gpu_idx', -1) device_args['cpu_only'] = device_args.get('cpu_only', False) if device_args['force_gpu_idx'] == -1 and not device_args['cpu_only']: idxs_names_list = nnlib.device.getValidDevicesIdxsWithNamesList() if len(idxs_names_list) > 1: io.log_info("You have multi GPUs in a system: ") for idx, name in idxs_names_list: io.log_info("[%d] : %s" % (idx, name)) device_args['force_gpu_idx'] = io.input_int( "Which GPU idx to choose? ( skip: best GPU ) : ", -1, [x[0] for x in idxs_names_list]) self.device_args = device_args self.device_config = nnlib.DeviceConfig(allow_growth=True, **self.device_args) io.log_info("Loading model...") self.model_path = model_path self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat')) self.training_data_src_path = training_data_src_path self.training_data_dst_path = training_data_dst_path self.pretraining_data_path = pretraining_data_path self.src_images_paths = None self.dst_images_paths = None self.src_yaw_images_paths = None self.dst_yaw_images_paths = None self.src_data_generator = None self.dst_data_generator = None self.debug = debug self.is_training_mode = (training_data_src_path is not None and training_data_dst_path is not None) self.iter = 0 self.options = {} self.loss_history = [] self.sample_for_preview = None model_data = {} if self.model_data_path.exists(): model_data = pickle.loads(self.model_data_path.read_bytes()) self.iter = max(model_data.get('iter', 0), model_data.get('epoch', 0)) if 'epoch' in self.options: self.options.pop('epoch') if self.iter != 0: self.options = model_data['options'] self.loss_history = model_data.get('loss_history', []) self.sample_for_preview = model_data.get( 'sample_for_preview', None) ask_override = self.is_training_mode and self.iter != 0 and io.input_in_time( "Press enter in 2 seconds to override model settings.", 5 if io.is_colab() else 2) yn_str = {True: 'y', False: 'n'} if self.iter == 0: io.log_info( "\nModel first run. Enter model options as default for each run." ) if ask_enable_autobackup and (self.iter == 0 or ask_override): default_autobackup = False if self.iter == 0 else self.options.get( 'autobackup', False) self.options['autobackup'] = io.input_bool( "Enable autobackup? (y/n ?:help skip:%s) : " % (yn_str[default_autobackup]), default_autobackup, help_message= "Autobackup model files with preview every hour for last 15 hours. Latest backup located in model/<>_autobackups/01" ) else: self.options['autobackup'] = self.options.get('autobackup', False) if ask_write_preview_history and (self.iter == 0 or ask_override): default_write_preview_history = False if self.iter == 0 else self.options.get( 'write_preview_history', False) self.options['write_preview_history'] = io.input_bool( "Write preview history? (y/n ?:help skip:%s) : " % (yn_str[default_write_preview_history]), default_write_preview_history, help_message= "Preview history will be writed to <ModelName>_history folder." ) else: self.options['write_preview_history'] = self.options.get( 'write_preview_history', False) if (self.iter == 0 or ask_override) and self.options[ 'write_preview_history'] and io.is_support_windows(): choose_preview_history = io.input_bool( "Choose image for the preview history? (y/n skip:%s) : " % (yn_str[False]), False) elif (self.iter == 0 or ask_override ) and self.options['write_preview_history'] and io.is_colab(): choose_preview_history = io.input_bool( "Randomly choose new image for preview history? (y/n ?:help skip:%s) : " % (yn_str[False]), False, help_message= "Preview image history will stay stuck with old faces if you reuse the same model on different celebs. Choose no unless you are changing src/dst to a new person" ) else: choose_preview_history = False if ask_target_iter: if (self.iter == 0 or ask_override): self.options['target_iter'] = max( 0, io.input_int( "Target iteration (skip:unlimited/default) : ", 0)) else: self.options['target_iter'] = max( model_data.get('target_iter', 0), self.options.get('target_epoch', 0)) if 'target_epoch' in self.options: self.options.pop('target_epoch') if ask_batch_size and (self.iter == 0 or ask_override): default_batch_size = 0 if self.iter == 0 else self.options.get( 'batch_size', 0) self.options['batch_size'] = max( 0, io.input_int( "Batch_size (?:help skip:%d) : " % (default_batch_size), default_batch_size, help_message= "Larger batch size is better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually." )) else: self.options['batch_size'] = self.options.get('batch_size', 0) if ask_sort_by_yaw: if (self.iter == 0 or ask_override): default_sort_by_yaw = self.options.get('sort_by_yaw', False) self.options['sort_by_yaw'] = io.input_bool( "Feed faces to network sorted by yaw? (y/n ?:help skip:%s) : " % (yn_str[default_sort_by_yaw]), default_sort_by_yaw, help_message= "NN will not learn src face directions that don't match dst face directions. Do not use if the dst face has hair that covers the jaw." ) else: self.options['sort_by_yaw'] = self.options.get( 'sort_by_yaw', False) if ask_random_flip: if (self.iter == 0): self.options['random_flip'] = io.input_bool( "Flip faces randomly? (y/n ?:help skip:y) : ", True, help_message= "Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset." ) else: self.options['random_flip'] = self.options.get( 'random_flip', True) if ask_src_scale_mod: if (self.iter == 0): self.options['src_scale_mod'] = np.clip( io.input_int( "Src face scale modifier % ( -30...30, ?:help skip:0) : ", 0, help_message= "If src face shape is wider than dst, try to decrease this value to get a better result." ), -30, 30) else: self.options['src_scale_mod'] = self.options.get( 'src_scale_mod', 0) self.autobackup = self.options.get('autobackup', False) if not self.autobackup and 'autobackup' in self.options: self.options.pop('autobackup') self.write_preview_history = self.options.get('write_preview_history', False) if not self.write_preview_history and 'write_preview_history' in self.options: self.options.pop('write_preview_history') self.target_iter = self.options.get('target_iter', 0) if self.target_iter == 0 and 'target_iter' in self.options: self.options.pop('target_iter') self.batch_size = self.options.get('batch_size', 0) self.sort_by_yaw = self.options.get('sort_by_yaw', False) self.random_flip = self.options.get('random_flip', True) self.src_scale_mod = self.options.get('src_scale_mod', 0) if self.src_scale_mod == 0 and 'src_scale_mod' in self.options: self.options.pop('src_scale_mod') self.onInitializeOptions(self.iter == 0, ask_override) nnlib.import_all(self.device_config) self.keras = nnlib.keras self.K = nnlib.keras.backend self.onInitialize() self.options['batch_size'] = self.batch_size if self.debug or self.batch_size == 0: self.batch_size = 1 if self.is_training_mode: if self.device_args['force_gpu_idx'] == -1: self.preview_history_path = self.model_path / ( '%s_history' % (self.get_model_name())) self.autobackups_path = self.model_path / ( '%s_autobackups' % (self.get_model_name())) else: self.preview_history_path = self.model_path / ( '%d_%s_history' % (self.device_args['force_gpu_idx'], self.get_model_name())) self.autobackups_path = self.model_path / ( '%d_%s_autobackups' % (self.device_args['force_gpu_idx'], self.get_model_name())) if self.autobackup: self.autobackup_current_hour = time.localtime().tm_hour if not self.autobackups_path.exists(): self.autobackups_path.mkdir(exist_ok=True) if self.write_preview_history or io.is_colab(): if not self.preview_history_path.exists(): self.preview_history_path.mkdir(exist_ok=True) else: if self.iter == 0: for filename in Path_utils.get_image_paths( self.preview_history_path): Path(filename).unlink() if self.generator_list is None: raise ValueError('You didnt set_training_data_generators()') else: for i, generator in enumerate(self.generator_list): if not isinstance(generator, SampleGeneratorBase): raise ValueError( 'training data generator is not subclass of SampleGeneratorBase' ) if self.sample_for_preview is None or choose_preview_history: if choose_preview_history and io.is_support_windows(): wnd_name = "[p] - next. [enter] - confirm." io.named_window(wnd_name) io.capture_keys(wnd_name) choosed = False while not choosed: self.sample_for_preview = self.generate_next_sample() preview = self.get_static_preview() io.show_image(wnd_name, (preview * 255).astype(np.uint8)) while True: key_events = io.get_key_events(wnd_name) key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[ -1] if len(key_events) > 0 else (0, 0, False, False, False) if key == ord('\n') or key == ord('\r'): choosed = True break elif key == ord('p'): break try: io.process_messages(0.1) except KeyboardInterrupt: choosed = True io.destroy_window(wnd_name) else: self.sample_for_preview = self.generate_next_sample() self.last_sample = self.sample_for_preview ###Generate text summary of model hyperparameters #Find the longest key name and value string. Used as column widths. width_name = max( [len(k) for k in self.options.keys()] + [17] ) + 1 # Single space buffer to left edge. Minimum of 17, the length of the longest static string used "Current iteration" width_value = max([len(str(x)) for x in self.options.values()] + [len(str(self.iter)), len(self.get_model_name())] ) + 1 # Single space buffer to right edge if not self.device_config.cpu_only: #Check length of GPU names width_value = max([ len(nnlib.device.getDeviceName(idx)) + 1 for idx in self.device_config.gpu_idxs ] + [width_value]) width_total = width_name + width_value + 2 #Plus 2 for ": " model_summary_text = [] model_summary_text += [f'=={" Model Summary ":=^{width_total}}==' ] # Model/status summary model_summary_text += [f'=={" "*width_total}=='] model_summary_text += [ f'=={"Model name": >{width_name}}: {self.get_model_name(): <{width_value}}==' ] # Name model_summary_text += [f'=={" "*width_total}=='] model_summary_text += [ f'=={"Current iteration": >{width_name}}: {str(self.iter): <{width_value}}==' ] # Iter model_summary_text += [f'=={" "*width_total}=='] model_summary_text += [f'=={" Model Options ":-^{width_total}}==' ] # Model options model_summary_text += [f'=={" "*width_total}=='] for key in self.options.keys(): model_summary_text += [ f'=={key: >{width_name}}: {str(self.options[key]): <{width_value}}==' ] # self.options key/value pairs model_summary_text += [f'=={" "*width_total}=='] model_summary_text += [f'=={" Running On ":-^{width_total}}==' ] # Training hardware info model_summary_text += [f'=={" "*width_total}=='] if self.device_config.multi_gpu: model_summary_text += [ f'=={"Using multi_gpu": >{width_name}}: {"True": <{width_value}}==' ] # multi_gpu model_summary_text += [f'=={" "*width_total}=='] if self.device_config.cpu_only: model_summary_text += [ f'=={"Using device": >{width_name}}: {"CPU": <{width_value}}==' ] # cpu_only else: for idx in self.device_config.gpu_idxs: model_summary_text += [ f'=={"Device index": >{width_name}}: {idx: <{width_value}}==' ] # GPU hardware device index model_summary_text += [ f'=={"Name": >{width_name}}: {nnlib.device.getDeviceName(idx): <{width_value}}==' ] # GPU name vram_str = f'{nnlib.device.getDeviceVRAMTotalGb(idx):.2f}GB' # GPU VRAM - Formated as #.## (or ##.##) model_summary_text += [ f'=={"VRAM": >{width_name}}: {vram_str: <{width_value}}==' ] model_summary_text += [f'=={" "*width_total}=='] model_summary_text += [f'=={"="*width_total}=='] if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[ 0] <= 2: # Low VRAM warning model_summary_text += ["/!\\"] model_summary_text += ["/!\\ WARNING:"] model_summary_text += [ "/!\\ You are using a GPU with 2GB or less VRAM. This may significantly reduce the quality of your result!" ] model_summary_text += [ "/!\\ If training does not start, close all programs and try again." ] model_summary_text += [ "/!\\ Also you can disable Windows Aero Desktop to increase available VRAM." ] model_summary_text += ["/!\\"] model_summary_text = "\n".join(model_summary_text) self.model_summary_text = model_summary_text io.log_info(model_summary_text)
def main (args, device_args): io.log_info ("Running converter.\r\n") aligned_dir = args.get('aligned_dir', None) avaperator_aligned_dir = args.get('avaperator_aligned_dir', None) try: input_path = Path(args['input_dir']) output_path = Path(args['output_dir']) model_path = Path(args['model_dir']) if not input_path.exists(): io.log_err('Input directory not found. Please ensure it exists.') return if output_path.exists(): for filename in Path_utils.get_image_paths(output_path): Path(filename).unlink() else: output_path.mkdir(parents=True, exist_ok=True) if not model_path.exists(): io.log_err('Model directory not found. Please ensure it exists.') return is_interactive = io.input_bool ("Use interactive converter? (y/n skip:y) : ", True) if not io.is_colab() else False import models model = models.import_model( args['model_name'] )(model_path, device_args=device_args) cfg = model.get_ConverterConfig() if not is_interactive: cfg.ask_settings() input_path_image_paths = Path_utils.get_image_paths(input_path) if cfg.type == ConverterConfig.TYPE_MASKED: if aligned_dir is None: io.log_err('Aligned directory not found. Please ensure it exists.') return aligned_path = Path(aligned_dir) if not aligned_path.exists(): io.log_err('Aligned directory not found. Please ensure it exists.') return alignments = {} multiple_faces_detected = False aligned_path_image_paths = Path_utils.get_image_paths(aligned_path) for filepath in io.progress_bar_generator(aligned_path_image_paths, "Collecting alignments"): filepath = Path(filepath) if filepath.suffix == '.png': dflimg = DFLPNG.load( str(filepath) ) elif filepath.suffix == '.jpg': dflimg = DFLJPG.load ( str(filepath) ) else: dflimg = None if dflimg is None: io.log_err ("%s is not a dfl image file" % (filepath.name) ) continue source_filename_stem = Path( dflimg.get_source_filename() ).stem if source_filename_stem not in alignments.keys(): alignments[ source_filename_stem ] = [] alignments_ar = alignments[ source_filename_stem ] alignments_ar.append (dflimg.get_source_landmarks()) if len(alignments_ar) > 1: multiple_faces_detected = True if multiple_faces_detected: io.log_info ("Warning: multiple faces detected. Strongly recommended to process them separately.") frames = [ ConvertSubprocessor.Frame( frame_info=FrameInfo(filename=p, landmarks_list=alignments.get(Path(p).stem, None))) for p in input_path_image_paths ] if multiple_faces_detected: io.log_info ("Warning: multiple faces detected. Motion blur will not be used.") else: s = 256 local_pts = [ (s//2-1, s//2-1), (s//2-1,0) ] #center+up frames_len = len(frames) for i in io.progress_bar_generator( range(len(frames)) , "Computing motion vectors"): fi_prev = frames[max(0, i-1)].frame_info fi = frames[i].frame_info fi_next = frames[min(i+1, frames_len-1)].frame_info if len(fi_prev.landmarks_list) == 0 or \ len(fi.landmarks_list) == 0 or \ len(fi_next.landmarks_list) == 0: continue mat_prev = LandmarksProcessor.get_transform_mat ( fi_prev.landmarks_list[0], s, face_type=FaceType.FULL) mat = LandmarksProcessor.get_transform_mat ( fi.landmarks_list[0] , s, face_type=FaceType.FULL) mat_next = LandmarksProcessor.get_transform_mat ( fi_next.landmarks_list[0], s, face_type=FaceType.FULL) pts_prev = LandmarksProcessor.transform_points (local_pts, mat_prev, True) pts = LandmarksProcessor.transform_points (local_pts, mat, True) pts_next = LandmarksProcessor.transform_points (local_pts, mat_next, True) prev_vector = pts[0]-pts_prev[0] next_vector = pts_next[0]-pts[0] motion_vector = pts_next[0] - pts_prev[0] fi.motion_power = npla.norm(motion_vector) motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array([0,0],dtype=np.float32) fi.motion_deg = -math.atan2(motion_vector[1],motion_vector[0])*180 / math.pi elif cfg.type == ConverterConfig.TYPE_FACE_AVATAR: filesdata = [] for filepath in io.progress_bar_generator(input_path_image_paths, "Collecting info"): filepath = Path(filepath) if filepath.suffix == '.png': dflimg = DFLPNG.load( str(filepath) ) elif filepath.suffix == '.jpg': dflimg = DFLJPG.load ( str(filepath) ) else: dflimg = None if dflimg is None: io.log_err ("%s is not a dfl image file" % (filepath.name) ) continue filesdata += [ ( FrameInfo(filename=str(filepath), landmarks_list=[dflimg.get_landmarks()] ), dflimg.get_source_filename() ) ] filesdata = sorted(filesdata, key=operator.itemgetter(1)) #sort by filename frames = [] filesdata_len = len(filesdata) for i in range(len(filesdata)): frame_info = filesdata[i][0] prev_temporal_frame_infos = [] next_temporal_frame_infos = [] for t in range (cfg.temporal_face_count): prev_frame_info = filesdata[ max(i -t, 0) ][0] next_frame_info = filesdata[ min(i +t, filesdata_len-1 )][0] prev_temporal_frame_infos.insert (0, prev_frame_info ) next_temporal_frame_infos.append ( next_frame_info ) frames.append ( ConvertSubprocessor.Frame(prev_temporal_frame_infos=prev_temporal_frame_infos, frame_info=frame_info, next_temporal_frame_infos=next_temporal_frame_infos) ) if len(frames) == 0: io.log_info ("No frames to convert in input_dir.") else: ConvertSubprocessor ( is_interactive = is_interactive, converter_config = cfg, frames = frames, output_path = output_path, ).run() model.finalize() except Exception as e: print ( 'Error: %s' % (str(e))) traceback.print_exc()
def relight(input_dir, lighten=None, random_one=None): if lighten is None: lighten = io.input_bool( "Lighten the faces? ( y/n default:n ?:help ) : ", False, help_message= "Lighten the faces instead of shadow. May produce artifacts.") if io.is_colab(): io.log_info( "In colab version you cannot choose light directions manually.") manual = False else: manual = io.input_bool( "Choose light directions manually? ( y/n default:y ) : ", True) if not manual: if random_one is None: random_one = io.input_bool( "Relight the faces only with one random direction and random intensity? ( y/n default:y ?:help) : ", True, help_message= "Otherwise faceset will be relighted with predefined 7 light directions but with random intensity." ) image_paths = [Path(x) for x in Path_utils.get_image_paths(input_dir)] filtered_image_paths = [] for filepath in io.progress_bar_generator(image_paths, "Collecting fileinfo"): try: if filepath.suffix == '.png': dflimg = DFLPNG.load(str(filepath)) elif filepath.suffix == '.jpg': dflimg = DFLJPG.load(str(filepath)) else: dflimg = None if dflimg is None: io.log_err("%s is not a dfl image file" % (filepath.name)) else: if not dflimg.get_relighted(): filtered_image_paths += [filepath] except: io.log_err( f"Exception occured while processing file {filepath.name}. Error: {traceback.format_exc()}" ) image_paths = filtered_image_paths if len(image_paths) == 0: io.log_info("No files to process.") return dpr = DeepPortraitRelighting() if manual: alt_azi_ar = RelightEditor(image_paths, dpr, lighten).run() for filepath in io.progress_bar_generator(image_paths, "Relighting"): try: if filepath.suffix == '.png': dflimg = DFLPNG.load(str(filepath)) elif filepath.suffix == '.jpg': dflimg = DFLJPG.load(str(filepath)) else: dflimg = None if dflimg is None: io.log_err("%s is not a dfl image file" % (filepath.name)) continue else: if dflimg.get_relighted(): continue img = cv2_imread(str(filepath)) if random_one: alt = np.random.randint(-90, 91) azi = np.random.randint(-90, 91) inten = np.random.random() * 0.3 + 0.3 relighted_imgs = [ dpr.relight(img, alt=alt, azi=azi, intensity=inten, lighten=lighten) ] else: if not manual and not random_one: inten = np.random.random() * 0.3 + 0.3 alt_azi_ar = [(60, 0, inten), (60, 60, inten), (0, 60, inten), (-60, 60, inten), (-60, 0, inten), (-60, -60, inten), (0, -60, inten), (60, -60, inten)] relighted_imgs = [ dpr.relight(img, alt=alt, azi=azi, intensity=inten, lighten=lighten) for (alt, azi, inten) in alt_azi_ar ] i = 0 for i, relighted_img in enumerate(relighted_imgs): im_flags = [] if filepath.suffix == '.jpg': im_flags += [int(cv2.IMWRITE_JPEG_QUALITY), 100] while True: relighted_filepath = filepath.parent / ( filepath.stem + f'_relighted_{i}' + filepath.suffix) if not relighted_filepath.exists(): break i += 1 cv2_imwrite(relighted_filepath, relighted_img) dflimg.remove_source_filename() dflimg.embed_and_set(relighted_filepath, relighted=True) except: io.log_err( f"Exception occured while processing file {filepath.name}. Error: {traceback.format_exc()}" )
def trainerThread(s2c, c2s, e, args, device_args): while True: try: start_time = time.time() training_data_src_path = Path(args.get("training_data_src_dir", "")) training_data_dst_path = Path(args.get("training_data_dst_dir", "")) pretraining_data_path = args.get("pretraining_data_dir", "") pretraining_data_path = ( Path(pretraining_data_path) if pretraining_data_path is not None else None ) model_path = Path(args.get("model_path", "")) model_name = args.get("model_name", "") save_interval_min = 15 debug = args.get("debug", "") execute_programs = args.get("execute_programs", []) if not training_data_src_path.exists(): io.log_err("Training data src directory does not exist.") break if not training_data_dst_path.exists(): io.log_err("Training data dst directory does not exist.") break if not model_path.exists(): model_path.mkdir(exist_ok=True) model = models.import_model(model_name)( model_path, training_data_src_path=training_data_src_path, training_data_dst_path=training_data_dst_path, pretraining_data_path=pretraining_data_path, debug=debug, device_args=device_args, ) is_reached_goal = model.is_reached_iter_goal() shared_state = {"after_save": False} loss_string = "" save_iter = model.get_iter() def model_save(): if not debug and not is_reached_goal: io.log_info("Saving....", end="\r") model.save() shared_state["after_save"] = True def send_preview(): if not debug: previews = model.get_previews() c2s.put( { "op": "show", "previews": previews, "iter": model.get_iter(), "loss_history": model.get_loss_history().copy(), } ) else: previews = [("debug, press update for new", model.debug_one_iter())] c2s.put({"op": "show", "previews": previews}) e.set() # Set the GUI Thread as Ready if model.is_first_run(): model_save() if model.get_target_iter() != 0: if is_reached_goal: io.log_info( "Model already trained to target iteration. You can use preview." ) else: io.log_info( 'Starting. Target iteration: %d. Press "Enter" to stop training and save model.' % (model.get_target_iter()) ) else: io.log_info('Starting. Press "Enter" to stop training and save model.') last_save_time = time.time() execute_programs = [[x[0], x[1], time.time()] for x in execute_programs] for i in itertools.count(0, 1): if not debug: cur_time = time.time() for x in execute_programs: prog_time, prog, last_time = x exec_prog = False if prog_time > 0 and (cur_time - start_time) >= prog_time: x[0] = 0 exec_prog = True elif prog_time < 0 and (cur_time - last_time) >= -prog_time: x[2] = cur_time exec_prog = True if exec_prog: try: exec(prog) except Exception as e: print("Unable to execute program: %s" % (prog)) if not is_reached_goal: iter, iter_time = model.train_one_iter() loss_history = model.get_loss_history() time_str = time.strftime("[%H:%M:%S]") if iter_time >= 10: loss_string = "{0}[#{1:06d}][{2:.5s}s]".format( time_str, iter, "{:0.4f}".format(iter_time) ) else: loss_string = "{0}[#{1:06d}][{2:04d}ms]".format( time_str, iter, int(iter_time * 1000) ) if shared_state["after_save"]: shared_state["after_save"] = False last_save_time = ( time.time() ) # upd last_save_time only after save+one_iter, because plaidML rebuilds programs after save https://github.com/plaidml/plaidml/issues/274 mean_loss = np.mean( [ np.array(loss_history[i]) for i in range(save_iter, iter) ], axis=0, ) for loss_value in mean_loss: loss_string += "[%.4f]" % (loss_value) io.log_info(loss_string) save_iter = iter else: for loss_value in loss_history[-1]: loss_string += "[%.4f]" % (loss_value) if io.is_colab(): io.log_info("\r" + loss_string, end="") else: io.log_info(loss_string, end="\r") if ( model.get_target_iter() != 0 and model.is_reached_iter_goal() ): io.log_info("Reached target iteration.") model_save() is_reached_goal = True io.log_info("You can use preview now.") break if ( not is_reached_goal and (time.time() - last_save_time) >= save_interval_min * 60 ): model_save() send_preview() if i == 0: if is_reached_goal: model.pass_one_iter() send_preview() if debug: time.sleep(0.005) while not s2c.empty(): input = s2c.get() op = input["op"] if op == "save": model_save() elif op == "preview": if is_reached_goal: model.pass_one_iter() send_preview() elif op == "close": model_save() i = -1 break if i == -1: break model.finalize() except Exception as e: print("Error: %s" % (str(e))) traceback.print_exc() break c2s.put({"op": "close"})
def trainerThread(s2c, c2s, e, args, device_args): while True: try: start_time = time.time() training_data_src_path = Path(args.get('training_data_src_dir', '')) training_data_dst_path = Path(args.get('training_data_dst_dir', '')) pretraining_data_path = args.get('pretraining_data_dir', '') pretraining_data_path = Path( pretraining_data_path ) if pretraining_data_path is not None else None model_path = Path(args.get('model_path', '')) model_name = args.get('model_name', '') save_interval_min = 5 target_loss = args.get("target_loss", 0) debug = args.get('debug', '') execute_programs = args.get('execute_programs', []) if not training_data_src_path.exists(): io.log_err('Training data src directory does not exist.') break if not training_data_dst_path.exists(): io.log_err('Training data dst directory does not exist.') break if not model_path.exists(): model_path.mkdir(exist_ok=True) model = models.import_model(model_name)( model_path, training_data_src_path=training_data_src_path, training_data_dst_path=training_data_dst_path, pretraining_data_path=pretraining_data_path, debug=debug, device_args=device_args) is_reached_goal = model.is_reached_iter_goal() shared_state = {'after_save': False} loss_string = "" save_iter = model.get_iter() def model_save(): if not debug and not is_reached_goal: io.log_info("Saving....", end='\r') model.save() backup() shared_state['after_save'] = True def backup(): import F if model.is_first_run(): return has_backup = F.has_backup(model_name, model_path) io.log_info("Backup....", end='\r') loss_src_mean, loss_dst_mean = np.mean([ np.array(loss_history[i]) for i in range(save_iter, iter) ], axis=0) loss_src, loss_dst = loss_history[-1] if has_backup and (iter > 20000 and loss_src_mean > 1 or loss_dst_mean > 1 or loss_src > 1 or loss_dst > 1): if model_name == "SAE" and model.options['archi'] == 'df': F.restore_model(model_name, model_path) weights_to_load = [ [model.encoder, 'encoder.h5'], [model.decoder_src, 'decoder_src.h5'], [model.decoder_dst, 'decoder_dst.h5'], [model.decoder_srcm, 'decoder_srcm.h5'], [model.decoder_dstm, 'decoder_dstm.h5'] ] model.load_weights_safe(weights_to_load) io.log_info("Crash And Try Restore....") if loss_src_mean <= 1 and loss_dst_mean <= 1 and loss_src <= 1 and loss_dst <= 1: F.backup_model_move(model_name, model_path) F.backup_model(model_name, model_path) def send_preview(): if not debug: previews = model.get_previews() c2s.put({ 'op': 'show', 'previews': previews, 'iter': model.get_iter(), 'loss_history': model.get_loss_history().copy() }) else: previews = [('debug, press update for new', model.debug_one_iter())] c2s.put({'op': 'show', 'previews': previews}) e.set() #Set the GUI Thread as Ready if model.is_first_run(): model_save() if model.get_target_iter() != 0: if is_reached_goal: io.log_info( 'Model already trained to target iteration. You can use preview.' ) else: io.log_info( 'Starting. Target iteration: %d. Press "Enter" to stop training and save model.' % (model.get_target_iter())) else: io.log_info( 'Starting. Press "Enter" to stop training and save model.') last_save_time = time.time() execute_programs = [[x[0], x[1], time.time()] for x in execute_programs] for i in itertools.count(0, 1): if not debug: cur_time = time.time() for x in execute_programs: prog_time, prog, last_time = x exec_prog = False if prog_time > 0 and (cur_time - start_time) >= prog_time: x[0] = 0 exec_prog = True elif prog_time < 0 and (cur_time - last_time) >= -prog_time: x[2] = cur_time exec_prog = True if exec_prog: try: exec(prog) except Exception as e: print("Unable to execute program: %s" % (prog)) if not is_reached_goal: iter, iter_time = model.train_one_iter() loss_history = model.get_loss_history() time_str = time.strftime("[%H:%M:%S]") if iter_time >= 10: loss_string = "{0}[#{1:06d}][{2:.5s}s]".format( time_str, iter, '{:0.4f}'.format(iter_time)) else: loss_string = "{0}[#{1:06d}][{2:04d}ms]".format( time_str, iter, int(iter_time * 1000)) if shared_state['after_save']: shared_state['after_save'] = False last_save_time = time.time( ) #upd last_save_time only after save+one_iter, because plaidML rebuilds programs after save https://github.com/plaidml/plaidml/issues/274 mean_loss = np.mean([ np.array(loss_history[i]) for i in range(save_iter, iter) ], axis=0) for loss_value in mean_loss: loss_string += "[%.4f]" % (loss_value) io.log_info(loss_string) save_iter = iter if mean_loss[0] <= target_loss and mean_loss[ 1] <= target_loss: is_reached_goal = True break else: for loss_value in loss_history[-1]: loss_string += "[%.4f]" % (loss_value) if io.is_colab(): io.log_info('\r' + loss_string, end='') else: io.log_info(loss_string, end='\r') if model.get_target_iter( ) != 0 and model.is_reached_iter_goal(): io.log_info('Reached target iteration.') model_save() is_reached_goal = True io.log_info('You can use preview now.') if not is_reached_goal and (time.time() - last_save_time ) >= save_interval_min * 60: model_save() send_preview() if i == 0: if is_reached_goal: model.pass_one_iter() send_preview() if debug: time.sleep(0.005) while not s2c.empty(): input = s2c.get() op = input['op'] if op == 'save': model_save() elif op == 'preview': if is_reached_goal: model.pass_one_iter() send_preview() elif op == 'close': model_save() i = -1 break if i == -1: break model.finalize() except Exception as e: print('Error: %s' % (str(e))) traceback.print_exc() break c2s.put({'op': 'close'})
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, pretraining_data_path=None, debug=False, device_args=None, ask_enable_autobackup=True, ask_write_preview_history=True, ask_target_iter=True, ask_batch_size=True, ask_sort_by_yaw=True, ask_random_flip=True, ask_src_scale_mod=True): device_args['force_gpu_idx'] = device_args.get('force_gpu_idx', -1) device_args['cpu_only'] = device_args.get('cpu_only', False) if device_args['force_gpu_idx'] == -1 and not device_args['cpu_only']: idxs_names_list = nnlib.device.getValidDevicesIdxsWithNamesList() if len(idxs_names_list) > 1: io.log_info("You have multi GPUs in a system: ") for idx, name in idxs_names_list: io.log_info("[%d] : %s" % (idx, name)) device_args['force_gpu_idx'] = io.input_int( "Which GPU idx to choose? ( skip: best GPU ) : ", -1, [x[0] for x in idxs_names_list]) self.device_args = device_args self.device_config = nnlib.DeviceConfig(allow_growth=False, **self.device_args) io.log_info("加载模型...") self.model_path = model_path self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat')) self.training_data_src_path = training_data_src_path self.training_data_dst_path = training_data_dst_path self.pretraining_data_path = pretraining_data_path self.src_images_paths = None self.dst_images_paths = None self.src_yaw_images_paths = None self.dst_yaw_images_paths = None self.src_data_generator = None self.dst_data_generator = None self.debug = debug self.is_training_mode = (training_data_src_path is not None and training_data_dst_path is not None) self.iter = 0 self.options = {} self.loss_history = [] self.sample_for_preview = None model_data = {} if self.model_data_path.exists(): model_data = pickle.loads(self.model_data_path.read_bytes()) self.iter = max(model_data.get('iter', 0), model_data.get('epoch', 0)) if 'epoch' in self.options: self.options.pop('epoch') if self.iter != 0: self.options = model_data['options'] self.loss_history = model_data.get('loss_history', []) self.sample_for_preview = model_data.get( 'sample_for_preview', None) ask_override = self.is_training_mode and self.iter != 0 and io.input_in_time( "\n2秒内按回车键[Enter]可以重新配置部分参数。\n\n", 5 if io.is_colab() else 2) yn_str = {True: 'y', False: 'n'} if self.iter == 0: io.log_info("\n第一次启动模型. 请输入模型选项,当再次启动时会加载当前配置.\n") if ask_enable_autobackup and (self.iter == 0 or ask_override): default_autobackup = False if self.iter == 0 else self.options.get( 'autobackup', False) self.options['autobackup'] = io.input_bool( "启动备份? (y/n ?:help skip:%s) : " % (yn_str[default_autobackup]), default_autobackup, help_message= "自动备份模型文件,过去15小时每小时备份一次。 位于model / <> _ autobackups /") else: self.options['autobackup'] = self.options.get('autobackup', False) if ask_write_preview_history and (self.iter == 0 or ask_override): default_write_preview_history = False if self.iter == 0 else self.options.get( 'write_preview_history', False) self.options['write_preview_history'] = io.input_bool( "保存历史预览图[write_preview_history]? (y/n ?:help skip:%s) : " % (yn_str[default_write_preview_history]), default_write_preview_history, help_message="预览图保存在<模型名称>_history文件夹。") else: self.options['write_preview_history'] = self.options.get( 'write_preview_history', False) if (self.iter == 0 or ask_override) and self.options[ 'write_preview_history'] and io.is_support_windows(): choose_preview_history = io.input_bool( "选择预览图图片[write_preview_history]? (y/n skip:%s) : " % (yn_str[False]), False) else: choose_preview_history = False if ask_target_iter: if (self.iter == 0 or ask_override): self.options['target_iter'] = max( 0, io.input_int( "目标迭代次数[Target iteration] (skip:unlimited/default) : ", 0)) else: self.options['target_iter'] = max( model_data.get('target_iter', 0), self.options.get('target_epoch', 0)) if 'target_epoch' in self.options: self.options.pop('target_epoch') if ask_batch_size and (self.iter == 0 or ask_override): default_batch_size = 0 if self.iter == 0 else self.options.get( 'batch_size', 0) self.options['batch_size'] = max( 0, io.input_int( "批处理大小[Batch_size] (?:help skip:%d) : " % (default_batch_size), default_batch_size, help_message= "较大的批量大小更适合神经网络[NN]的泛化,但它可能导致内存不足[OOM]的错误。根据你显卡配置合理设置改选项,默认为4,推荐16." )) else: self.options['batch_size'] = self.options.get('batch_size', 0) if ask_sort_by_yaw: if (self.iter == 0 or ask_override): default_sort_by_yaw = self.options.get('sort_by_yaw', False) self.options['sort_by_yaw'] = io.input_bool( "根据侧脸排序[Feed faces to network sorted by yaw]? (y/n ?:help skip:%s) : " % (yn_str[default_sort_by_yaw]), default_sort_by_yaw, help_message= "神经网络[NN]不会学习与dst面部方向不匹配的src面部方向。 如果dst脸部有覆盖下颚的头发,请不要启用.") else: self.options['sort_by_yaw'] = self.options.get( 'sort_by_yaw', False) if ask_random_flip: if (self.iter == 0): self.options['random_flip'] = io.input_bool( "随机反转[Flip faces randomly]? (y/n ?:help skip:y) : ", True, help_message= "如果没有此选项,预测的脸部看起来会更自然,但源[src]的脸部集合[faceset]应覆盖所有面部方向,去陪陪目标[dst]的脸部集合[faceset]。" ) else: self.options['random_flip'] = self.options.get( 'random_flip', True) if ask_src_scale_mod: if (self.iter == 0): self.options['src_scale_mod'] = np.clip( io.input_int( "源脸缩放[Src face scale modifier] % ( -30...30, ?:help skip:0) : ", 0, help_message="如果src面部形状比dst宽,请尝试减小此值以获得更好的结果。"), -30, 30) else: self.options['src_scale_mod'] = self.options.get( 'src_scale_mod', 0) self.autobackup = self.options.get('autobackup', False) if not self.autobackup and 'autobackup' in self.options: self.options.pop('autobackup') self.write_preview_history = self.options.get('write_preview_history', False) if not self.write_preview_history and 'write_preview_history' in self.options: self.options.pop('write_preview_history') self.target_iter = self.options.get('target_iter', 0) if self.target_iter == 0 and 'target_iter' in self.options: self.options.pop('target_iter') self.batch_size = self.options.get('batch_size', 0) self.sort_by_yaw = self.options.get('sort_by_yaw', False) self.random_flip = self.options.get('random_flip', True) self.src_scale_mod = self.options.get('src_scale_mod', 0) if self.src_scale_mod == 0 and 'src_scale_mod' in self.options: self.options.pop('src_scale_mod') self.onInitializeOptions(self.iter == 0, ask_override) nnlib.import_all(self.device_config) self.keras = nnlib.keras self.K = nnlib.keras.backend self.onInitialize() self.options['batch_size'] = self.batch_size if self.debug or self.batch_size == 0: self.batch_size = 1 if self.is_training_mode: if self.device_args['force_gpu_idx'] == -1: self.preview_history_path = self.model_path / ( '%s_history' % (self.get_model_name())) self.autobackups_path = self.model_path / ( '%s_autobackups' % (self.get_model_name())) else: self.preview_history_path = self.model_path / ( '%d_%s_history' % (self.device_args['force_gpu_idx'], self.get_model_name())) self.autobackups_path = self.model_path / ( '%d_%s_autobackups' % (self.device_args['force_gpu_idx'], self.get_model_name())) if self.autobackup: self.autobackup_current_hour = time.localtime().tm_hour if not self.autobackups_path.exists(): self.autobackups_path.mkdir(exist_ok=True) if self.write_preview_history or io.is_colab(): if not self.preview_history_path.exists(): self.preview_history_path.mkdir(exist_ok=True) else: if self.iter == 0: for filename in Path_utils.get_image_paths( self.preview_history_path): Path(filename).unlink() if self.generator_list is None: raise ValueError('You didnt set_training_data_generators()') else: for i, generator in enumerate(self.generator_list): if not isinstance(generator, SampleGeneratorBase): raise ValueError( 'training data generator is not subclass of SampleGeneratorBase' ) if self.sample_for_preview is None or choose_preview_history: if choose_preview_history and io.is_support_windows(): wnd_name = "[p] - next. [enter] - confirm." io.named_window(wnd_name) io.capture_keys(wnd_name) choosed = False while not choosed: self.sample_for_preview = self.generate_next_sample() preview = self.get_static_preview() io.show_image(wnd_name, (preview * 255).astype(np.uint8)) while True: key_events = io.get_key_events(wnd_name) key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[ -1] if len(key_events) > 0 else (0, 0, False, False, False) if key == ord('\n') or key == ord('\r'): choosed = True break elif key == ord('p'): break try: io.process_messages(0.1) except KeyboardInterrupt: choosed = True io.destroy_window(wnd_name) else: self.sample_for_preview = self.generate_next_sample() self.last_sample = self.sample_for_preview model_summary_text = [] model_summary_text += ["\n===== 模型信息 =====\n"] model_summary_text += ["== 模型名称: " + self.get_model_name()] model_summary_text += ["=="] model_summary_text += ["== 当前迭代: " + str(self.iter)] model_summary_text += ["=="] model_summary_text += ["== 模型配置信息:"] for key in self.options.keys(): model_summary_text += ["== |== %s : %s" % (key, self.options[key])] if self.device_config.multi_gpu: model_summary_text += ["== |== multi_gpu : True "] model_summary_text += ["== Running on:"] if self.device_config.cpu_only: model_summary_text += ["== |== [CPU]"] else: for idx in self.device_config.gpu_idxs: model_summary_text += [ "== |== [%d : %s]" % (idx, nnlib.device.getDeviceName(idx)) ] if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[ 0] == 2: model_summary_text += ["=="] model_summary_text += [ "== WARNING: You are using 2GB GPU. Result quality may be significantly decreased." ] model_summary_text += [ "== If training does not start, close all programs and try again." ] model_summary_text += [ "== Also you can disable Windows Aero Desktop to get extra free VRAM." ] model_summary_text += ["=="] model_summary_text += ["========================="] model_summary_text = "\r\n".join(model_summary_text) self.model_summary_text = model_summary_text io.log_info(model_summary_text)