def main(): print('Initializing Training Process..') parser = argparse.ArgumentParser() parser.add_argument('--group_name', default=None) parser.add_argument('--checkpoint_path', default='cp_hifigan') parser.add_argument('--config', default='config_8k.json') parser.add_argument('--training_epochs', default=3100, type=int) parser.add_argument('--stdout_interval', default=5, type=int) parser.add_argument('--checkpoint_interval', default=5000, type=int) parser.add_argument('--summary_interval', default=100, type=int) parser.add_argument('--validation_interval', default=1000, type=int) parser.add_argument('--fine_tuning', default=False, type=bool) a = parser.parse_args() with open(a.config) as f: data = f.read() json_config = json.loads(data) h = AttrDict(json_config) build_env(a.config, 'config.json', a.checkpoint_path) model = Generator(h) inputs = torch.randn(10, 80, 80) output = model(inputs) print(output.shape)
def main(): print('Initializing Training Process..') parser = argparse.ArgumentParser() parser.add_argument('--group_name', default=None) parser.add_argument('--input_wavs_dir', default='LJSpeech-1.1/wavs', help='') parser.add_argument('--input_mels_dir', default='ft_dataset', help='') parser.add_argument('--input_training_file', default='LJSpeech-1.1/training.txt', help='') parser.add_argument('--input_validation_file', default='LJSpeech-1.1/validation.txt', help='') parser.add_argument('--checkpoint_path', default='cp_hifigan') parser.add_argument('--config', default='') parser.add_argument('--training_epochs', default=3100, type=int) parser.add_argument('--stdout_interval', default=5, type=int) parser.add_argument('--checkpoint_interval', default=5000, type=int) parser.add_argument('--summary_interval', default=100, type=int) parser.add_argument('--validation_interval', default=1000, type=int) parser.add_argument('--fine_tuning', default=False, type=bool) a = parser.parse_args() with open(a.config) as f: data = f.read() json_config = json.loads(data) h = AttrDict(json_config) build_env(a.config, 'config.json', a.checkpoint_path) torch.manual_seed(h.seed) if torch.cuda.is_available(): torch.cuda.manual_seed(h.seed) h.num_gpus = torch.cuda.device_count() h.batch_size = int(h.batch_size / h.num_gpus) print('Batch size per GPU :', h.batch_size) else: pass if h.num_gpus > 1: mp.spawn(train, nprocs=h.num_gpus, args=( a, h, )) else: train(0, a, h)
def launch( executable, arguments=None, env_configs=None, from_current_env=True, keys_to_remove=None, dry=False): # Build command line: if is_string(arguments): arguments = shlex.split(arguments) elif arguments is None: arguments = [] command = [executable] + arguments # Build env: env = build_env( env_configs, from_current_env=from_current_env, keys_to_remove=keys_to_remove, override_warnings=True, verbose=dry) # Launch: if dry: return try: if PLATFORM == 'linux': subprocess.call(command, env=env) else: if any(s in executable for s in ('cmd', 'powershell', 'pwsh')): subprocess.call(command, env=env, shell=True) else: subprocess.Popen(command, env=env) except TypeError: pprint.pprint(env) raise except FileNotFoundError: print('Failed to launch following command:') print(command) print('PATH =', env['PATH'].split(os.pathsep)) raise
def main(): print('Initializing the Training Process..') parser = argparse.ArgumentParser() parser.add_argument('--input_wavs_dir', default='data/recordings') parser.add_argument('--input_mels_dir', default='processed_spokenDigits_np') parser.add_argument('--config', default='processed_spokenDigits_np') parser.add_argument('--training_epochs', default='1000') a = parser.parse_args() with open(a.config) as f: data = f.read() json_config = json.loads(data) h = AttrDict(json_config) build_env(a.config, 'config.json', a.checkpoint_path) torch.manual_seed(h.seed): if torch.cuda.is_availale(h.seed): torch.cuda.manual_seeed(h.seed) h.batch_size = int(h.batch_size / h.num_gpu) else: print('\nRunning on cpu') # train now-- g_losses, d_losses, generated_mels = train(h) # visualize the loss as the network trained plt.plot(g_losses, d_losses) plt.xlabel('100\'s of batches') plt.ylabel('loss') plt.grid(True) # plt.ylim(0, 2.5) # consistent scale plt.show()
def main(): print('Initializing Training Process..') parser = argparse.ArgumentParser() parser.add_argument('--rank', default=0, type=int) parser.add_argument('--group_name', default=None) parser.add_argument('--input_wavs_dir', default='data/LJSpeech-1.1/wavs') parser.add_argument('--input_train_metafile', default='data/LJSpeech-1.1/metadata_ljspeech.csv') parser.add_argument('--input_valid_metafile', default='data/LJSpeech-1.1/metadata_test_ljspeech.csv') parser.add_argument('--inference', default=False, action='store_true') parser.add_argument('--cps', default='cp_melgan') parser.add_argument('--cp_g', default='') # ex) cp_mgt_01/g_100.pth parser.add_argument('--cp_d', default='') # ex) cp_mgt_01/d_100.pth parser.add_argument('--config', default='hparams.json') parser.add_argument('--training_epochs', default=5000, type=int) parser.add_argument('--stdout_interval', default=1, type=int) parser.add_argument('--checkpoint_interval', default=5000, type=int) parser.add_argument('--summary_interval', default=100, type=int) parser.add_argument('--validation_interval', default=1000, type=int) a = parser.parse_args() with open(a.config) as f: data = f.read() global h json_config = json.loads(data) h = AttrDict(json_config) build_env(a.config, 'config.json', a.cps) torch.manual_seed(h.seed) global device if torch.cuda.is_available(): torch.cuda.manual_seed(h.seed) device = torch.device('cuda') h.num_gpus = torch.cuda.device_count() else: device = torch.device('cpu') fit(a, a.training_epochs)
import os from common.argparser import args from env import build_env from models.a2c.a2c import learn if __name__ == '__main__': env = build_env( n_vertices=args.n_vertices, n_edges=args.n_edges, n_actions=args.n_actions ) d_model, a_model = learn( env=env, defender=args.d_model, attacker=args.a_model, seed=args.seed, nsteps=args.batchsize, total_epoches=args.total_epoches, vf_coef=args.vf_coef, ent_coef=args.ent_coef, max_grad_norm=args.max_grad_norm, lr=args.lr, gamma=args.gamma, d_load_path=os.path.join('logs', args.d_load, 'd_model.ckpt') if args.d_load else None, a_load_path=os.path.join('logs', args.a_load, 'a_model.ckpt') if args.a_load else None, d_save_path=os.path.join('logs', args.note, 'd_model.ckpt'), a_save_path=os.path.join('logs', args.note, 'a_model.ckpt'), )