def process_train(arguments): os_utils.set_process_lowest_prio() args = { 'training_data_src_dir': arguments.training_data_src_dir, 'training_data_dst_dir': arguments.training_data_dst_dir, 'pretraining_data_dir': arguments.pretraining_data_dir, 'model_path': arguments.model_dir, 'model_name': arguments.model_name, 'no_preview': arguments.no_preview, 'debug': arguments.debug, 'execute_programs': [[int(x[0]), x[1]] for x in arguments.execute_program] } device_args = { 'cpu_only': arguments.cpu_only, 'force_gpu_idx': arguments.force_gpu_idx, } from mainscripts import Trainer Trainer.main(args, device_args)
def process_train(arguments): if 'DFL_TARGET_EPOCH' in os.environ.keys(): arguments.target_epoch = int ( os.environ['DFL_TARGET_EPOCH'] ) if 'DFL_BATCH_SIZE' in os.environ.keys(): arguments.batch_size = int ( os.environ['DFL_BATCH_SIZE'] ) from mainscripts import Trainer Trainer.main ( training_data_src_dir=arguments.training_data_src_dir, training_data_dst_dir=arguments.training_data_dst_dir, model_path=arguments.model_dir, model_name=arguments.model_name, debug = arguments.debug, #**options batch_size = arguments.batch_size, write_preview_history = arguments.write_preview_history, target_epoch = arguments.target_epoch, save_interval_min = arguments.save_interval_min, choose_worst_gpu = arguments.choose_worst_gpu, force_best_gpu_idx = arguments.force_best_gpu_idx, multi_gpu = arguments.multi_gpu, force_gpu_idxs = arguments.force_gpu_idxs, cpu_only = arguments.cpu_only )
def process_train(arguments): osex.set_process_lowest_prio() kwargs = { 'model_class_name': arguments.model_name, 'saved_models_path': Path(arguments.model_dir), 'training_data_src_path': Path(arguments.training_data_src_dir), 'training_data_dst_path': Path(arguments.training_data_dst_dir), 'pretraining_data_path': Path(arguments.pretraining_data_dir) if arguments.pretraining_data_dir is not None else None, 'pretrained_model_path': Path(arguments.pretrained_model_dir) if arguments.pretrained_model_dir is not None else None, 'no_preview': arguments.no_preview, 'force_model_name': arguments.force_model_name, 'force_gpu_idxs': [int(x) for x in arguments.force_gpu_idxs.split(',')] if arguments.force_gpu_idxs is not None else None, 'cpu_only': arguments.cpu_only, 'execute_programs': [[int(x[0]), x[1]] for x in arguments.execute_program], 'debug': arguments.debug, } from mainscripts import Trainer Trainer.main(**kwargs)
def SPTrainLab(context): args = context.args if args.pretrainedModelDir: modelList = get_all_files(args.pretrainedModelDir) for i in modelList: shutil.copy(i, os.path.join(args.modelDir, os.path.split(i)[1])) training_args = { "training_data_src_dir": args.trainingDataSrcDir, "training_data_dst_dir": args.trainingDataDstDir, "pretraining_data_dir": args.pretrainingDataDir, "model_path": args.modelDir, "model_name": args.modelName, "no_preview": True, "debug": False, "execute_programs": [], } device_args = {"cpu_only": args.cpuOnly, "force_gpu_idx": args.forceGpuIdx} if not args.__edit: os.environ["SP_FaceLab_Edit"] = "False" os.environ["SP_FaceLab_Iterations"] = str(args.iterations) os.environ["SP_FaceLab_Batch_Size"] = str(args.batchSize) from mainscripts import Trainer Trainer.main(training_args, device_args) for p in multiprocessing.active_children(): if p != multiprocessing.current_process(): p.kill() return args.modelDir
def process_train(arguments): osex.set_process_lowest_prio() kwargs = { 'model_class_name': arguments.model_name, 'saved_models_path': Path(arguments.model_dir), 'training_data_src_path': Path(arguments.training_data_src_dir), 'training_data_dst_path': Path(arguments.training_data_dst_dir), 'pretraining_data_path': Path(arguments.pretraining_data_dir) if arguments.pretraining_data_dir is not None else None, 'pretrained_model_path': Path(arguments.pretrained_model_dir) if arguments.pretrained_model_dir is not None else None, 'no_preview': arguments.no_preview, 'force_model_name': arguments.force_model_name, 'force_gpu_idxs': [int(x) for x in arguments.force_gpu_idxs.split(',')] if arguments.force_gpu_idxs is not None else None, 'cpu_only': arguments.cpu_only, 'silent_start': arguments.silent_start, 'execute_programs': [[int(x[0]), x[1]] for x in arguments.execute_program], 'debug': arguments.debug, 'use_amp': arguments.use_amp, 'opt': arguments.opt, 'lr': arguments.lr, 'decay_step': arguments.decay_step, 'config_file': arguments.config_file, 'bs_per_gpu': arguments.bs_per_gpu, } if arguments.api == 'dfl': from mainscripts import Trainer Trainer.main(**kwargs) elif arguments.api == 'tf1': from mainscripts import Trainer_tf1 Trainer_tf1.main(**kwargs) else: print('Training API {} is invalid'.format(arguments.api)) exit(0)
def process_train(arguments): from mainscripts import Trainer Trainer.main( training_data_src_dir=arguments.training_data_src_dir, training_data_dst_dir=arguments.training_data_dst_dir, model_path=arguments.model_dir, model_name=arguments.model_name, debug=arguments.debug, #**options force_gpu_idx=arguments.force_gpu_idx, cpu_only=arguments.cpu_only)
def process_train(arguments): args = {'training_data_src_dir' : arguments.training_data_src_dir, 'training_data_dst_dir' : arguments.training_data_dst_dir, 'model_path' : arguments.model_dir, 'model_name' : arguments.model_name, 'no_preview' : arguments.no_preview, 'debug' : arguments.debug, } device_args = {'cpu_only' : arguments.cpu_only, 'force_gpu_idx' : arguments.force_gpu_idx, } from mainscripts import Trainer Trainer.main(args, device_args)
def process_train(arguments): from mainscripts import Trainer Trainer.main( training_data_src_dir=arguments.training_data_src_dir, training_data_dst_dir=arguments.training_data_dst_dir, model_path=arguments.model_dir, model_name=arguments.model_name, debug=arguments.debug, #**options batch_size=arguments.batch_size, write_preview_history=arguments.write_preview_history, target_epoch=arguments.target_epoch, save_interval_min=arguments.save_interval_min, force_best_gpu_idx=arguments.force_best_gpu_idx, multi_gpu=arguments.multi_gpu)
def process_train(arguments): if 'DFL_TARGET_EPOCH' in os.environ.keys(): arguments.session_target_epoch = int( os.environ['DFL_TARGET_EPOCH']) if 'DFL_BATCH_SIZE' in os.environ.keys(): arguments.batch_size = int(os.environ['DFL_BATCH_SIZE']) if 'DFL_WORST_GPU' in os.environ.keys(): arguments.choose_worst_gpu = True from mainscripts import Trainer Trainer.main( training_data_src_dir=arguments.training_data_src_dir, training_data_dst_dir=arguments.training_data_dst_dir, model_path=arguments.model_dir, model_name=arguments.model_name, debug=arguments.debug, #**options choose_worst_gpu=arguments.choose_worst_gpu, force_best_gpu_idx=arguments.force_best_gpu_idx, force_gpu_idxs=arguments.force_gpu_idxs, cpu_only=arguments.cpu_only)