def training(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #===================================# #==============Logging==============# #===================================# logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) handler = TqdmLoggingHandler() handler.setFormatter( logging.Formatter(" %(asctime)s - %(message)s", "%Y-%m-%d %H:%M:%S")) logger.addHandler(handler) logger.propagate = False #===================================# #============Data Load==============# #===================================# # 1) Dataloader setting write_log(logger, "Load data...") gc.disable() dataset_dict = { 'train': CustomDataset(data_path=args.preprocessed_path, phase='train'), 'valid': CustomDataset(data_path=args.preprocessed_path, phase='valid'), 'test': CustomDataset(data_path=args.preprocessed_path, phase='test') } unique_menu_count = dataset_dict['train'].unique_count() dataloader_dict = { 'train': DataLoader(dataset_dict['train'], drop_last=True, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.num_workers, collate_fn=PadCollate()), 'valid': DataLoader(dataset_dict['valid'], drop_last=False, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.num_workers, collate_fn=PadCollate()), 'test': DataLoader(dataset_dict['test'], drop_last=False, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.num_workers, collate_fn=PadCollate()) } gc.enable() write_log( logger, f"Total number of trainingsets iterations - {len(dataset_dict['train'])}, {len(dataloader_dict['train'])}" ) #===================================# #===========Model setting===========# #===================================# # 1) Model initiating write_log(logger, "Instantiating models...") model = Transformer(model_type=args.model_type, input_size=unique_menu_count, d_model=args.d_model, d_embedding=args.d_embedding, n_head=args.n_head, dim_feedforward=args.dim_feedforward, num_encoder_layer=args.num_encoder_layer, dropout=args.dropout) model = model.train() model = model.to(device) # 2) Optimizer setting optimizer = optimizer_select(model, args) scheduler = shceduler_select(optimizer, dataloader_dict, args) criterion = nn.MSELoss() scaler = GradScaler(enabled=True) model, optimizer = amp.initialize(model, optimizer, opt_level='O1') # 2) Model resume start_epoch = 0 if args.resume: checkpoint = torch.load(os.path.join(args.model_path, 'checkpoint.pth.tar'), map_location='cpu') start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) model = model.train() model = model.to(device) del checkpoint #===================================# #=========Model Train Start=========# #===================================# best_val_rmse = 9999999 write_log(logger, 'Train start!') for epoch in range(start_epoch, args.num_epochs): for phase in ['train', 'valid']: if phase == 'train': model.train() train_start_time = time.time() freq = 0 elif phase == 'valid': model.eval() val_loss = 0 val_rmse = 0 for i, (src_menu, label_lunch, label_supper) in enumerate(dataloader_dict[phase]): # Optimizer setting optimizer.zero_grad() # Input, output setting src_menu = src_menu.to(device, non_blocking=True) label_lunch = label_lunch.float().to(device, non_blocking=True) label_supper = label_supper.float().to(device, non_blocking=True) # Model with torch.set_grad_enabled(phase == 'train'): with autocast(enabled=True): if args.model_type == 'sep': logit = model(src_menu) logit_lunch = logit[:, 0] logit_supper = logit[:, 0] elif args.model_type == 'total': logit = model(src_menu) logit_lunch = logit[:, 0] logit_supper = logit[:, 1] # Loss calculate loss_lunch = criterion(logit_lunch, label_lunch) loss_supper = criterion(logit_supper, label_supper) loss = loss_lunch + loss_supper # Back-propagation if phase == 'train': scaler.scale(loss).backward() scaler.unscale_(optimizer) clip_grad_norm_(model.parameters(), args.clip_grad_norm) scaler.step(optimizer) scaler.update() # Scheduler setting if args.scheduler in ['constant', 'warmup']: scheduler.step() if args.scheduler == 'reduce_train': scheduler.step(loss) # Print loss value rmse_loss = torch.sqrt(loss) if phase == 'train': if i == 0 or freq == args.print_freq or i == len( dataloader_dict['train']): batch_log = "[Epoch:%d][%d/%d] train_MSE_loss:%2.3f | train_RMSE_loss:%2.3f | learning_rate:%3.6f | spend_time:%3.2fmin" \ % (epoch+1, i, len(dataloader_dict['train']), loss.item(), rmse_loss.item(), optimizer.param_groups[0]['lr'], (time.time() - train_start_time) / 60) write_log(logger, batch_log) freq = 0 freq += 1 elif phase == 'valid': val_loss += loss.item() val_rmse += rmse_loss.item() if phase == 'valid': val_loss /= len(dataloader_dict['valid']) val_rmse /= len(dataloader_dict['valid']) write_log(logger, 'Validation Loss: %3.3f' % val_loss) write_log(logger, 'Validation RMSE: %3.3f' % val_rmse) if val_rmse < best_val_rmse: write_log(logger, 'Checkpoint saving...') if not os.path.exists(args.save_path): os.mkdir(args.save_path) torch.save( { 'epoch': epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'scaler': scaler.state_dict() }, os.path.join(args.save_path, f'checkpoint_cap.pth.tar')) best_val_rmse = val_rmse best_epoch = epoch else: else_log = f'Still {best_epoch} epoch RMSE({round(best_val_rmse, 3)}) is better...' write_log(logger, else_log) # 3) write_log(logger, f'Best Epoch: {best_epoch+1}') write_log(logger, f'Best Accuracy: {round(best_val_rmse, 3)}')
def training(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #===================================# #==============Logging==============# #===================================# logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) handler = TqdmLoggingHandler() handler.setFormatter( logging.Formatter(" %(asctime)s - %(message)s", "%Y-%m-%d %H:%M:%S")) logger.addHandler(handler) logger.propagate = False #===================================# #============Data Load==============# #===================================# # 1) Data open write_log(logger, "Load data...") gc.disable() with open(os.path.join(args.preprocess_path, 'processed.pkl'), 'rb') as f: data_ = pickle.load(f) train_src_indices = data_['train_src_indices'] valid_src_indices = data_['valid_src_indices'] train_trg_indices = data_['train_trg_indices'] valid_trg_indices = data_['valid_trg_indices'] src_word2id = data_['src_word2id'] trg_word2id = data_['trg_word2id'] src_vocab_num = len(src_word2id) trg_vocab_num = len(trg_word2id) del data_ gc.enable() write_log(logger, "Finished loading data!") # 2) Dataloader setting dataset_dict = { 'train': CustomDataset(train_src_indices, train_trg_indices, min_len=args.min_len, src_max_len=args.src_max_len, trg_max_len=args.trg_max_len), 'valid': CustomDataset(valid_src_indices, valid_trg_indices, min_len=args.min_len, src_max_len=args.src_max_len, trg_max_len=args.trg_max_len), } dataloader_dict = { 'train': DataLoader(dataset_dict['train'], drop_last=True, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.num_workers), 'valid': DataLoader(dataset_dict['valid'], drop_last=False, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.num_workers) } write_log( logger, f"Total number of trainingsets iterations - {len(dataset_dict['train'])}, {len(dataloader_dict['train'])}" ) #===================================# #===========Train setting===========# #===================================# # 1) Model initiating write_log(logger, 'Instantiating model...') model = Transformer( src_vocab_num=src_vocab_num, trg_vocab_num=trg_vocab_num, pad_idx=args.pad_id, bos_idx=args.bos_id, eos_idx=args.eos_id, d_model=args.d_model, d_embedding=args.d_embedding, n_head=args.n_head, dim_feedforward=args.dim_feedforward, num_common_layer=args.num_common_layer, num_encoder_layer=args.num_encoder_layer, num_decoder_layer=args.num_decoder_layer, src_max_len=args.src_max_len, trg_max_len=args.trg_max_len, dropout=args.dropout, embedding_dropout=args.embedding_dropout, trg_emb_prj_weight_sharing=args.trg_emb_prj_weight_sharing, emb_src_trg_weight_sharing=args.emb_src_trg_weight_sharing, parallel=args.parallel) model.train() model = model.to(device) tgt_mask = model.generate_square_subsequent_mask(args.trg_max_len - 1, device) # 2) Optimizer & Learning rate scheduler setting optimizer = optimizer_select(model, args) scheduler = shceduler_select(optimizer, dataloader_dict, args) scaler = GradScaler() # 3) Model resume start_epoch = 0 if args.resume: write_log(logger, 'Resume model...') checkpoint = torch.load( os.path.join(args.save_path, 'checkpoint.pth.tar')) start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) scaler.load_state_dict(checkpoint['scaler']) del checkpoint #===================================# #=========Model Train Start=========# #===================================# best_val_acc = 0 write_log(logger, 'Traing start!') for epoch in range(start_epoch + 1, args.num_epochs + 1): start_time_e = time() for phase in ['train', 'valid']: if phase == 'train': model.train() if phase == 'valid': write_log(logger, 'Validation start...') val_loss = 0 val_acc = 0 model.eval() for i, (src, trg) in enumerate( tqdm(dataloader_dict[phase], bar_format='{l_bar}{bar:30}{r_bar}{bar:-2b}')): # Optimizer setting optimizer.zero_grad(set_to_none=True) # Input, output setting src = src.to(device, non_blocking=True) trg = trg.to(device, non_blocking=True) trg_sequences_target = trg[:, 1:] non_pad = trg_sequences_target != args.pad_id trg_sequences_target = trg_sequences_target[ non_pad].contiguous().view(-1) # Train if phase == 'train': # Loss calculate with autocast(): predicted = model(src, trg[:, :-1], tgt_mask, non_pad_position=non_pad) predicted = predicted.view(-1, predicted.size(-1)) loss = label_smoothing_loss(predicted, trg_sequences_target, args.pad_id) scaler.scale(loss).backward() scaler.unscale_(optimizer) clip_grad_norm_(model.parameters(), args.clip_grad_norm) scaler.step(optimizer) scaler.update() if args.scheduler in ['constant', 'warmup']: scheduler.step() if args.scheduler == 'reduce_train': scheduler.step(loss) # Print loss value only training if i == 0 or freq == args.print_freq or i == len( dataloader_dict['train']): acc = (predicted.max(dim=1)[1] == trg_sequences_target ).sum() / len(trg_sequences_target) iter_log = "[Epoch:%03d][%03d/%03d] train_loss:%03.3f | train_acc:%03.2f%% | learning_rate:%1.6f | spend_time:%02.2fmin" % \ (epoch, i, len(dataloader_dict['train']), loss.item(), acc*100, optimizer.param_groups[0]['lr'], (time() - start_time_e) / 60) write_log(logger, iter_log) freq = 0 freq += 1 # Validation if phase == 'valid': with torch.no_grad(): predicted = model(src, trg[:, :-1], tgt_mask, non_pad_position=non_pad) loss = F.cross_entropy(predicted, trg_sequences_target) val_loss += loss.item() val_acc += (predicted.max(dim=1)[1] == trg_sequences_target ).sum() / len(trg_sequences_target) if args.scheduler == 'reduce_valid': scheduler.step(val_loss) if args.scheduler == 'lambda': scheduler.step() if phase == 'valid': val_loss /= len(dataloader_dict[phase]) val_acc /= len(dataloader_dict[phase]) write_log(logger, 'Validation Loss: %3.3f' % val_loss) write_log(logger, 'Validation Accuracy: %3.2f%%' % (val_acc * 100)) if val_acc > best_val_acc: write_log(logger, 'Checkpoint saving...') torch.save( { 'epoch': epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'scaler': scaler.state_dict() }, f'checkpoint_{args.parallel}.pth.tar') best_val_acc = val_acc best_epoch = epoch else: else_log = f'Still {best_epoch} epoch accuracy({round(best_val_acc.item()*100, 2)})% is better...' write_log(logger, else_log) # 3) Print results print(f'Best Epoch: {best_epoch}') print(f'Best Accuracy: {round(best_val_acc.item(), 2)}')
class Trainer: def __init__(self, params, mode, train_iter=None, valid_iter=None, test_iter=None): self.params = params # Train mode if mode == 'train': self.train_iter = train_iter self.valid_iter = valid_iter # Test mode else: self.test_iter = test_iter self.model = Transformer(self.params) self.model.to(self.params.device) # Scheduling Optimzer self.optimizer = ScheduledAdam(optim.Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-9), hidden_dim=params.hidden_dim, warm_steps=params.warm_steps) self.criterion = nn.CrossEntropyLoss(ignore_index=self.params.pad_idx) self.criterion.to(self.params.device) def train(self): print(self.model) print( f'The model has {self.model.count_params():,} trainable parameters' ) best_valid_loss = float('inf') for epoch in range(self.params.num_epoch): self.model.train() epoch_loss = 0 start_time = time.time() for batch in self.train_iter: # For each batch, first zero the gradients self.optimizer.zero_grad() source = batch.kor target = batch.eng # target sentence consists of <sos> and following tokens (except the <eos> token) output = self.model(source, target[:, :-1])[0] # ground truth sentence consists of tokens and <eos> token (except the <sos> token) output = output.contiguous().view(-1, output.shape[-1]) target = target[:, 1:].contiguous().view(-1) # output = [(batch size * target length - 1), output dim] # target = [(batch size * target length - 1)] loss = self.criterion(output, target) loss.backward() # clip the gradients to prevent the model from exploding gradient torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.params.clip) self.optimizer.step() # 'item' method is used to extract a scalar from a tensor which only contains a single value. epoch_loss += loss.item() train_loss = epoch_loss / len(self.train_iter) valid_loss = self.evaluate() end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(self.model.state_dict(), self.params.save_model) print( f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s' ) print( f'\tTrain Loss: {train_loss:.3f} | Val. Loss: {valid_loss:.3f}' ) def evaluate(self): self.model.eval() epoch_loss = 0 with torch.no_grad(): for batch in self.valid_iter: source = batch.kor target = batch.eng output = self.model(source, target[:, :-1])[0] output = output.contiguous().view(-1, output.shape[-1]) target = target[:, 1:].contiguous().view(-1) loss = self.criterion(output, target) epoch_loss += loss.item() return epoch_loss / len(self.valid_iter) def inference(self): self.model.load_state_dict(torch.load(self.params.save_model)) self.model.eval() epoch_loss = 0 with torch.no_grad(): for batch in self.test_iter: source = batch.kor target = batch.eng output = self.model(source, target[:, :-1])[0] output = output.contiguous().view(-1, output.shape[-1]) target = target[:, 1:].contiguous().view(-1) loss = self.criterion(output, target) epoch_loss += loss.item() test_loss = epoch_loss / len(self.test_iter) print(f'Test Loss: {test_loss:.3f}')
and config.pretrain_emb): xavier_uniform_(p) elif (config.model == "experts"): model = Transformer_experts(vocab, decoder_number=program_number) for n, p in model.named_parameters(): if p.dim() > 1 and (n != "embedding.lut.weight" and config.pretrain_emb): xavier_uniform_(p) print("MODEL USED", config.model) print("TRAINABLE PARAMETERS", count_parameters(model)) check_iter = 2000 try: if (config.USE_CUDA): model.cuda() model = model.train() best_ppl = 1000 patient = 0 writer = SummaryWriter(log_dir=config.save_path) weights_best = deepcopy(model.state_dict()) data_iter = make_infinite(data_loader_tra) for n_iter in tqdm(range(1000000)): loss, ppl, bce, acc = model.train_one_batch(next(data_iter), n_iter) writer.add_scalars('loss', {'loss_train': loss}, n_iter) writer.add_scalars('ppl', {'ppl_train': ppl}, n_iter) writer.add_scalars('bce', {'bce_train': bce}, n_iter) writer.add_scalars('accuracy', {'acc_train': acc}, n_iter) if (config.noam): writer.add_scalars('lr', {'learning_rata': model.optimizer._rate}, n_iter)
def main(args): comm = MPI.COMM_WORLD world_size = comm.Get_size() rank = comm.Get_rank() os.environ["MASTER_ADDR"] = "127.0.0.1" os.environ["MASTER_PORT"] = str(args.master_port) torch.cuda.set_device(rank) dist.init_process_group(backend="nccl", world_size=world_size, rank=rank) device = torch.device("cuda") logger = None tb_logger = None if rank == 0: if not os.path.exists(args.save_path): os.mkdir(args.save_path) if not os.path.exists(args.tensorboard_log_dir): os.mkdir(args.tensorboard_log_dir) tb_logger = SummaryWriter( f"{args.tensorboard_log_dir}/{args.model_name}") logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) handler = TqdmLoggingHandler() handler.setFormatter(logging.Formatter(" %(asctime)s - %(message)s")) logger.addHandler(handler) logger.propagate = False write_log(logger, "Load data") def load_data(args): gc.disable() with open(f"{args.preprocessed_data_path}/hanja_korean_word2id.pkl", "rb") as f: data = pickle.load(f) hanja_word2id = data['hanja_word2id'] korean_word2id = data['korean_word2id'] with open(f"{args.preprocessed_data_path}/preprocessed_train.pkl", "rb") as f: data = pickle.load(f) train_hanja_indices = data['hanja_indices'] train_korean_indices = data['korean_indices'] train_additional_hanja_indices = data['additional_hanja_indices'] with open(f"{args.preprocessed_data_path}/preprocessed_valid.pkl", "rb") as f: data = pickle.load(f) valid_hanja_indices = data['hanja_indices'] valid_korean_indices = data['korean_indices'] valid_additional_hanja_indices = data['additional_hanja_indices'] gc.enable() write_log(logger, "Finished loading data!") return (hanja_word2id, korean_word2id, train_hanja_indices, train_korean_indices, train_additional_hanja_indices, valid_hanja_indices, valid_korean_indices, valid_additional_hanja_indices) # load data (hanja_word2id, korean_word2id, train_hanja_indices, train_korean_indices, train_additional_hanja_indices, valid_hanja_indices, valid_korean_indices, valid_additional_hanja_indices) = load_data(args) hanja_vocab_num = len(hanja_word2id) korean_vocab_num = len(korean_word2id) hk_dataset = HanjaKoreanDataset(train_hanja_indices, train_korean_indices, min_len=args.min_len, src_max_len=args.src_max_len, trg_max_len=args.trg_max_len) hk_sampler = DistributedSampler(hk_dataset, num_replicas=world_size, rank=rank) hk_loader = DataLoader(hk_dataset, drop_last=True, batch_size=args.hk_batch_size, sampler=hk_sampler, num_workers=args.num_workers, prefetch_factor=4, pin_memory=True) write_log(logger, f"hanja-korean: {len(hk_dataset)}, {len(hk_loader)}") h_dataset = HanjaDataset(train_hanja_indices, train_additional_hanja_indices, hanja_word2id, min_len=args.min_len, src_max_len=args.src_max_len) h_sampler = DistributedSampler(h_dataset, num_replicas=world_size, rank=rank) h_loader = DataLoader(h_dataset, drop_last=True, batch_size=args.h_batch_size, sampler=h_sampler, num_workers=args.num_workers, prefetch_factor=4, pin_memory=True) write_log(logger, f"hanja: {len(h_dataset)}, {len(h_loader)}") hk_valid_dataset = HanjaKoreanDataset(valid_hanja_indices, valid_korean_indices, min_len=args.min_len, src_max_len=args.src_max_len, trg_max_len=args.trg_max_len) hk_valid_sampler = DistributedSampler(hk_valid_dataset, num_replicas=world_size, rank=rank) hk_valid_loader = DataLoader(hk_valid_dataset, drop_last=True, batch_size=args.hk_batch_size, sampler=hk_valid_sampler) write_log( logger, f"hanja-korean-valid: {len(hk_valid_dataset)}, {len(hk_valid_loader)}") h_valid_dataset = HanjaDataset(valid_hanja_indices, valid_additional_hanja_indices, hanja_word2id, min_len=args.min_len, src_max_len=args.src_max_len) h_valid_sampler = DistributedSampler(h_valid_dataset, num_replicas=world_size, rank=rank) h_valid_loader = DataLoader(h_valid_dataset, drop_last=True, batch_size=args.h_batch_size, sampler=h_valid_sampler) write_log(logger, f"hanja: {len(h_valid_dataset)}, {len(h_valid_loader)}") del (train_hanja_indices, train_korean_indices, train_additional_hanja_indices, valid_hanja_indices, valid_korean_indices, valid_additional_hanja_indices) write_log(logger, "Build model") model = Transformer(hanja_vocab_num, korean_vocab_num, pad_idx=args.pad_idx, bos_idx=args.bos_idx, eos_idx=args.eos_idx, src_max_len=args.src_max_len, trg_max_len=args.trg_max_len, d_model=args.d_model, d_embedding=args.d_embedding, n_head=args.n_head, dropout=args.dropout, dim_feedforward=args.dim_feedforward, num_encoder_layer=args.num_encoder_layer, num_decoder_layer=args.num_decoder_layer, num_mask_layer=args.num_mask_layer).to(device) model = nn.parallel.DistributedDataParallel(model, device_ids=[device], find_unused_parameters=True) for param in model.parameters(): dist.broadcast(param.data, 0) dist.barrier() write_log( logger, f"Total Parameters: {sum([p.nelement() for p in model.parameters()])}") no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0 }, ] optimizer = Ralamb(params=optimizer_grouped_parameters, lr=args.lr) total_iters = round( len(hk_loader) / args.num_grad_accumulate * args.epochs) scheduler = get_cosine_schedule_with_warmup( optimizer, round(total_iters * args.warmup_ratio), total_iters) scaler = GradScaler() start_epoch = 0 if args.resume: def load_states(): checkpoint = torch.load( f'{args.save_path}/{args.model_name}_ckpt.pt', map_location='cpu') start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) scaler.load_state_dict(checkpoint['scaler']) return start_epoch start_epoch = load_states() write_log(logger, f"Training start - Total iter: {total_iters}\n") iter_num = round(len(hk_loader) / args.num_grad_accumulate) global_step = start_epoch * iter_num hk_iter = iter(hk_loader) h_iter = iter(h_loader) model.train() tgt_mask = Transformer.generate_square_subsequent_mask( args.trg_max_len - 1, device) # validation validate(model, tgt_mask, h_valid_loader, hk_valid_loader, rank, logger, tb_logger, 0, device) for epoch in range(start_epoch + 1, args.epochs + 1): while True: start = time.time() finish_epoch = False trans_top5, trans_loss, mask_top5, mask_loss = 0.0, 0.0, 0.0, 0.0 if args.train_reconstruct: optimizer.zero_grad(set_to_none=True) for _ in range(args.num_grad_accumulate): try: src_sequences, trg_sequences = next(h_iter) except StopIteration: h_sampler.set_epoch(epoch) h_iter = iter(h_loader) src_sequences, trg_sequences = next(h_iter) trg_sequences = trg_sequences.to(device) src_sequences = src_sequences.to(device) non_pad = trg_sequences != args.pad_idx trg_sequences = trg_sequences[non_pad].contiguous().view( -1) with autocast(): predicted = model.module.reconstruct_predict( src_sequences, masked_position=non_pad) predicted = predicted.view(-1, predicted.size(-1)) loss = label_smoothing_loss( predicted, trg_sequences) / args.num_grad_accumulate scaler.scale(loss).backward() if global_step % args.print_freq == 0: mask_top5 += accuracy(predicted, trg_sequences, 5) / args.num_grad_accumulate mask_loss += loss.detach().item() for param in model.parameters(): if param.grad is not None: dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM) param.grad.data = param.grad.data / world_size scaler.step(optimizer) scaler.update() if args.train_translate: optimizer.zero_grad(set_to_none=True) for _ in range(args.num_grad_accumulate): try: src_sequences, trg_sequences = next(hk_iter) except StopIteration: hk_sampler.set_epoch(epoch) hk_iter = iter(hk_loader) src_sequences, trg_sequences = next(hk_iter) finish_epoch = True trg_sequences = trg_sequences.to(device) trg_sequences_target = trg_sequences[:, 1:] src_sequences = src_sequences.to(device) non_pad = trg_sequences_target != args.pad_idx trg_sequences_target = trg_sequences_target[ non_pad].contiguous().view(-1) with autocast(): predicted = model(src_sequences, trg_sequences[:, :-1], tgt_mask, non_pad_position=non_pad) predicted = predicted.view(-1, predicted.size(-1)) loss = label_smoothing_loss( predicted, trg_sequences_target) / args.num_grad_accumulate scaler.scale(loss).backward() if global_step % args.print_freq == 0: trans_top5 += accuracy(predicted, trg_sequences_target, 5) / args.num_grad_accumulate trans_loss += loss.detach().item() for param in model.parameters(): if param.grad is not None: dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM) param.grad.data = param.grad.data / world_size scaler.step(optimizer) scaler.update() scheduler.step() # Print status if global_step % args.print_freq == 0: if args.train_reconstruct: mask_top5 = torch.cuda.FloatTensor([mask_top5]) mask_loss = torch.cuda.FloatTensor([mask_loss]) dist.all_reduce(mask_top5, op=dist.ReduceOp.SUM) dist.all_reduce(mask_loss, op=dist.ReduceOp.SUM) mask_top5 = (mask_top5 / world_size).item() mask_loss = (mask_loss / world_size).item() if args.train_translate: trans_top5 = torch.cuda.FloatTensor([trans_top5]) trans_loss = torch.cuda.FloatTensor([trans_loss]) dist.all_reduce(trans_top5, op=dist.ReduceOp.SUM) dist.all_reduce(trans_loss, op=dist.ReduceOp.SUM) trans_top5 = (trans_top5 / world_size).item() trans_loss = (trans_loss / world_size).item() if rank == 0: batch_time = time.time() - start write_log( logger, f'[{global_step}/{total_iters}, {epoch}]\tIter time: {batch_time:.3f}\t' f'Trans loss: {trans_loss:.3f}\tMask_loss: {mask_loss:.3f}\t' f'Trans@5: {trans_top5:.3f}\tMask@5: {mask_top5:.3f}') tb_logger.add_scalar('loss/translate', trans_loss, global_step) tb_logger.add_scalar('loss/mask', mask_loss, global_step) tb_logger.add_scalar('top5/translate', trans_top5, global_step) tb_logger.add_scalar('top5/mask', mask_top5, global_step) tb_logger.add_scalar('batch/time', batch_time, global_step) tb_logger.add_scalar('batch/lr', optimizer.param_groups[0]['lr'], global_step) global_step += 1 if finish_epoch: break # validation validate(model, tgt_mask, h_valid_loader, hk_valid_loader, rank, logger, tb_logger, epoch, device) # save model if rank == 0: torch.save( { 'epoch': epoch, 'model': model.module.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'scaler': scaler.state_dict() }, f'{args.save_path}/{args.model_name}_ckpt.pt') write_log(logger, f"***** {epoch}th model updated! *****")