writer = SummaryWriter(os.path.join(args.tensorboard_dir, exp_id)) if distributed: assert (torch.cuda.is_available()) # cuda model is required for nn.parallel.DistributedDataParallel model.cuda() model = torch.nn.parallel.DistributedDataParallel( model, find_unused_parameters=True) device = torch.device("cuda") else: use_cuda = args.gpu >= 0 and torch.cuda.is_available() device = torch.device('cuda' if use_cuda else 'cpu') model = model.to(device) optimizer = optim.Adam(model.parameters(), **configs['optim_conf']) scheduler = WarmupLR(optimizer, **configs['scheduler_conf']) final_epoch = None configs['rank'] = args.rank configs['is_distributed'] = distributed configs['use_amp'] = args.use_amp if start_epoch == 0 and args.rank == 0: save_model_path = os.path.join(model_dir, 'init.pt') save_checkpoint(model, save_model_path) # Start training loop executor.step = step scheduler.set_step(step) # used for pytorch amp mixed precision training scaler = None if args.use_amp: scaler = torch.cuda.amp.GradScaler()
def main(): args = get_args() logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s') os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) # Set random seed torch.manual_seed(777) print(args) with open(args.config, 'r') as fin: configs = yaml.load(fin, Loader=yaml.FullLoader) if len(args.override_config) > 0: configs = override_config(configs, args.override_config) distributed = args.world_size > 1 if distributed: logging.info('training on multiple gpus, this gpu {}'.format(args.gpu)) dist.init_process_group(args.dist_backend, init_method=args.init_method, world_size=args.world_size, rank=args.rank) symbol_table = read_symbol_table(args.symbol_table) train_conf = configs['dataset_conf'] cv_conf = copy.deepcopy(train_conf) cv_conf['speed_perturb'] = False cv_conf['spec_aug'] = False cv_conf['shuffle'] = False cv_conf['apply_alaw_codec'] = False cv_conf['add_noise'] = False cv_conf['add_babble'] = False cv_conf['add_reverb'] = False cv_conf['apply_codec'] = False cv_conf['volume_perturb'] = False cv_conf['pitch_shift'] = False non_lang_syms = read_non_lang_symbols(args.non_lang_syms) train_dataset = Dataset(args.data_type, args.train_data, symbol_table, train_conf, args.bpe_model, non_lang_syms, True) cv_dataset = Dataset(args.data_type, args.cv_data, symbol_table, cv_conf, args.bpe_model, non_lang_syms, partition=False) train_data_loader = DataLoader(train_dataset, batch_size=None, pin_memory=args.pin_memory, num_workers=args.num_workers, prefetch_factor=args.prefetch) cv_data_loader = DataLoader(cv_dataset, batch_size=None, pin_memory=args.pin_memory, num_workers=args.num_workers, prefetch_factor=args.prefetch) if 'fbank_conf' in configs['dataset_conf']: input_dim = configs['dataset_conf']['fbank_conf']['num_mel_bins'] else: input_dim = configs['dataset_conf']['mfcc_conf']['num_mel_bins'] vocab_size = len(symbol_table) # Save configs to model_dir/train.yaml for inference and export configs['input_dim'] = input_dim configs['output_dim'] = vocab_size configs['cmvn_file'] = args.cmvn configs['is_json_cmvn'] = True if args.rank == 0: saved_config_path = os.path.join(args.model_dir, 'train.yaml') with open(saved_config_path, 'w') as fout: data = yaml.dump(configs) fout.write(data) # Init asr model from configs model = init_asr_model(configs) if args.rank == 0: print(model) num_params = sum(p.numel() for p in model.parameters()) print('the number of model params: {}'.format(num_params)) # !!!IMPORTANT!!! # Try to export the model by script, if fails, we should refine # the code to satisfy the script export requirements if args.rank == 0: script_model = torch.jit.script(model) script_model.save(os.path.join(args.model_dir, 'init.zip')) executor = Executor() # If specify checkpoint, load some info from checkpoint if args.checkpoint is not None: infos = load_checkpoint(model, args.checkpoint) elif args.enc_init is not None: logging.info('load pretrained encoders: {}'.format(args.enc_init)) infos = load_trained_modules(model, args) else: infos = {} start_epoch = infos.get('epoch', -1) + 1 cv_loss = infos.get('cv_loss', 0.0) step = infos.get('step', -1) num_epochs = configs.get('max_epoch', 100) model_dir = args.model_dir writer = None if args.rank == 0: os.makedirs(model_dir, exist_ok=True) exp_id = os.path.basename(model_dir) writer = SummaryWriter(os.path.join(args.tensorboard_dir, exp_id)) if distributed: assert (torch.cuda.is_available()) # cuda model is required for nn.parallel.DistributedDataParallel model.cuda() model = torch.nn.parallel.DistributedDataParallel( model, find_unused_parameters=True) device = torch.device("cuda") if args.fp16_grad_sync: from torch.distributed.algorithms.ddp_comm_hooks import ( default as comm_hooks, ) model.register_comm_hook( state=None, hook=comm_hooks.fp16_compress_hook ) else: use_cuda = args.gpu >= 0 and torch.cuda.is_available() device = torch.device('cuda' if use_cuda else 'cpu') model = model.to(device) if configs['optim'] == 'adam': print('optimizer is adam') optimizer = optim.Adam(model.parameters(), **configs['optim_conf']) elif configs['optim'] == 'sgd': print('optimizer is sgd') optimizer = optim.SGD(model.parameters(), **configs['optim_conf']) scheduler = WarmupLR(optimizer, **configs['scheduler_conf']) final_epoch = None configs['rank'] = args.rank configs['is_distributed'] = distributed configs['use_amp'] = args.use_amp if start_epoch == 0 and args.rank == 0: save_model_path = os.path.join(model_dir, 'init.pt') save_checkpoint(model, save_model_path) # Start training loop executor.step = step scheduler.set_step(step) # used for pytorch amp mixed precision training scaler = None if args.use_amp: scaler = torch.cuda.amp.GradScaler() for epoch in range(start_epoch, num_epochs): train_dataset.set_epoch(epoch) configs['epoch'] = epoch lr = optimizer.param_groups[0]['lr'] logging.info('Epoch {} TRAIN info lr {}'.format(epoch, lr)) executor.train(model, optimizer, scheduler, train_data_loader, device, writer, configs, scaler) total_loss, total_loss_att, total_loss_ctc, num_seen_utts = executor.cv( model, cv_data_loader, device, configs) cv_loss = total_loss / num_seen_utts cv_loss_att = total_loss_att / num_seen_utts cv_loss_ctc = total_loss_ctc / num_seen_utts logging.info('Epoch {} CV info cv_loss {}'.format(epoch, cv_loss)) if args.rank == 0: save_model_path = os.path.join(model_dir, '{}.pt'.format(epoch)) save_checkpoint( model, save_model_path, { 'epoch': epoch, 'lr': lr, 'cv_loss': cv_loss, 'cv_loss_att': cv_loss_att, 'cv_loss_ctc': cv_loss_ctc, 'step': executor.step }) writer.add_scalar('epoch/cv_loss', cv_loss, epoch) writer.add_scalar('epoch/cv_loss_att', cv_loss, epoch) writer.add_scalar('epoch/cv_loss_ctc', cv_loss, epoch) writer.add_scalar('epoch/lr', lr, epoch) final_epoch = epoch if final_epoch is not None and args.rank == 0: final_model_path = os.path.join(model_dir, 'final.pt') os.symlink('{}.pt'.format(final_epoch), final_model_path) writer.close()