def main(_): # Load configuration. with open(FLAGS.config, 'r') as f: config = yaml.load(f) # Initialize CoNLL dataset. dataset = CoNLLDataset(fname=config['data']['train'], target='lm') # Initialize model. language_model = LanguageModel( vocab_size=len(dataset.token_vocab), embedding_dim=config['model']['embedding_dim'], hidden_size=config['model']['hidden_size'], num_layers=config['model']['num_layers']) if torch.cuda.is_available(): language_model = language_model.cuda() # Initialize loss function. NOTE: Manually setting weight of padding to 0. weight = torch.ones(len(dataset.token_vocab)) weight[0] = 0 if torch.cuda.is_available(): weight = weight.cuda() loss_function = torch.nn.NLLLoss(weight) optimizer = torch.optim.Adam(language_model.parameters()) # Main training loop. data_loader = DataLoader(dataset, batch_size=config['training']['batch_size'], shuffle=True, collate_fn=collate_annotations) losses = [] i = 0 for epoch in range(config['training']['num_epochs']): for batch in data_loader: inputs, targets, lengths = batch optimizer.zero_grad() outputs, _ = language_model(inputs, lengths=lengths) outputs = outputs.view(-1, len(dataset.token_vocab)) targets = targets.view(-1) loss = loss_function(outputs, targets) loss.backward() optimizer.step() losses.append(loss.data[0]) if (i % 100) == 0: average_loss = np.mean(losses) losses = [] print('Iteration %i - Loss: %0.6f' % (i, average_loss), end='\r') if (i % 1000) == 0: torch.save(language_model, config['data']['checkpoint']) i += 1 torch.save(language_model, config['data']['checkpoint'])
def train(opt): # Read preprocessed data print_line() print('Loading training data ...') check_name = re.compile('.*\.prep\.train\.pt') assert os.path.exists( opt.train_data) or check_name.match(opt.train_data) is None train_dataset = torch.load(opt.train_data) train_dataset.set_batch_size(opt.batch_size) print('Done.') print_line() print('Loading validation data ...') check_name = re.compile('.*\.prep\.val\.pt') assert os.path.exists( opt.val_data) or check_name.match(opt.val_data) is None val_dataset = torch.load(opt.val_data) val_dataset.set_batch_size(opt.batch_size) print('Done.') # Build / load Model if opt.model_reload is None: print_line() print('Build new model...') model = LanguageModel(train_dataset.num_vocb, dim_word=opt.dim_word, dim_rnn=opt.dim_rnn, num_layers=opt.num_layers, dropout_rate=opt.dropout_rate) model.dictionary = train_dataset.dictionary print('Done') train_dataset.describe_dataset() val_dataset.describe_dataset() else: print_line() print('Loading existing model...') model = torch.load(opt.model_reload) print('done') train_dataset.change_dict(model.dictionary) val_dataset.change_dict(model.dictionary) model_start_epoch = model.train_info['epoch idx'] - 1 model_start_batch = model.train_info['batch idx'] - 1 # Use GPU / CPU print_line() if opt.cuda: model.cuda() print('Using GPU %d' % torch.cuda.current_device()) else: print('Using CPU') # Crterion, mask padding criterion_weight = torch.ones(train_dataset.num_vocb + 1) criterion_weight[const.PAD] = 0 criterion = nn.CrossEntropyLoss(weight=criterion_weight, size_average=False) if opt.cuda: criterion = criterion.cuda() # Optimizer lr = opt.lr optimizer = getattr(optim, opt.optimizer)(model.parameters(), lr=lr) if (model_start_epoch > opt.epoch): print( 'This model has already trained more than %d epoch, add epoch parameter is you want to continue' % (opt.epoch + 1)) return print_line() print('') if opt.model_reload is None: print('Start training new model, will go through %d epoch' % opt.epoch) else: print('Continue existing model, from epoch %d, batch %d to epoch %d' % (model_start_epoch, model_start_batch, opt.epoch)) print('') best_model = model.train_info if opt.save_freq == 0: opt.save_freq = train_dataset.num_batch - 1 # Train model.train() for epoch_idx in range(model_start_epoch, opt.epoch): # New epoch acc_loss = 0 acc_count = 0 start_time = time.time() train_dataset.shuffle() print_line() print('Start epoch %d, learning rate %f ' % (epoch_idx + 1, lr)) print_line('-') epoch_start_time = start_time # If load model and continue training if epoch_idx == model_start_epoch and model_start_batch > 0: start_batch = model_start_batch else: start_batch = 0 for batch_idx in range(start_batch, train_dataset.num_batch): # Generate batch data batch_data, batch_lengths, target_words = train_dataset[batch_idx] if opt.cuda: batch_data = batch_data.cuda() batch_lengths = batch_lengths.cuda() target_words = target_words.cuda() batch_data = Variable(batch_data, requires_grad=False) batch_lengths = Variable(batch_lengths, requires_grad=False) target_words = Variable(target_words, requires_grad=False) optimizer.zero_grad() # Forward output_flat = model.forward(batch_data, batch_lengths) # Caculate loss loss = criterion(output_flat, target_words.view(-1)) # Backward loss.backward() # Prevent gradient explode torch.nn.utils.clip_grad_norm(model.parameters(), opt.clip) # Update parameters optimizer.step() # Accumulate loss acc_loss += loss.data acc_count += batch_lengths.data.sum() # Display progress if batch_idx % opt.display_freq == 0: average_loss = acc_loss[0] / acc_count.item() print( 'Epoch : %d, Batch : %d / %d, Loss : %f, Perplexity : %f, Time : %f' % (epoch_idx + 1, batch_idx, train_dataset.num_batch, average_loss, math.exp(average_loss), time.time() - start_time)) acc_loss = 0 acc_count = 0 start_time = time.time() #Save and validate if it is neccesary if (1 + batch_idx) % opt.save_freq == 0: print_line('-') print('Pause training for save and validate.') model.eval() val_loss = evaluate(model=model, eval_dataset=val_dataset, cuda=opt.cuda, criterion=criterion) model.train() print('Validation Loss : %f' % val_loss) print('Validation Perplexity : %f' % math.exp(val_loss)) model_savename = opt.model_name + '-e_' + str( epoch_idx + 1) + '-b_' + str(batch_idx + 1) + '-ppl_' + str( int(math.exp(val_loss))) + '.pt' model.val_loss = val_loss model.val_ppl = math.exp(val_loss) model.epoch_idx = epoch_idx + 1 model.batch_idx = batch_idx + 1 model.train_info['val loss'] = val_loss model.train_info['train loss'] = math.exp(val_loss) model.train_info['epoch idx'] = epoch_idx + 1 model.train_info['batch idx'] = batch_idx + 1 model.train_info['val ppl'] = math.exp(model.val_loss) model.train_info['save name'] = model_savename try: torch.save(model, model_savename) except: print('Failed to save model!') if model.val_loss < best_model['val loss']: print_line('-') print('New best model on validation set') best_model = model.train_info shutil.copy2(best_model['name'], opt.model_name + '.best.pt') print_line('-') print('Save model at %s' % (model_savename)) print_line('-') print('Continue Training...') print_line('-') print('Epoch %d finished, spend %d s' % (epoch_idx + 1, time.time() - epoch_start_time)) # Update lr if needed lr *= opt.lr_decay optimizer = getattr(optim, opt.optimizer)(model.parameters(), lr=lr) # Finish training print_line() print(' ') print('Finish training %d epochs!' % opt.epoch) print(' ') print_line() print('Best model:') print('Epoch : %d, Batch : %d ,Loss : %f, Perplexity : %f' % (best_model['epoch idx'], best_model['batch idx'], best_model['val loss'], best_model['val ppl'])) print_line('-') print('Save best model at %s' % (opt.model_name + '.best.pt')) shutil.copy2(best_model['name'], opt.model_name + '.best.pt') print_line()
def detach_hidden(h): """Detach hidden states from their history.""" if isinstance(h, torch.Tensor): return h.detach() return tuple(detach_hidden(v) for v in h) torch.backends.cudnn.benchmark = True torch.manual_seed(0) np.random.seed(0) labels = Labels() num_labels = len(labels) model = LanguageModel(128, 512, 256, num_labels, n_layers=3, dropout=0.3) model.cuda() bptt = 8 batch_size = 32 train = [ '/media/lytic/STORE/ru_open_stt_wav/text/public_youtube1120_hq.txt', '/media/lytic/STORE/ru_open_stt_wav/text/public_youtube1120.txt', '/media/lytic/STORE/ru_open_stt_wav/text/public_youtube700.txt' ] test = [ '/media/lytic/STORE/ru_open_stt_wav/text/asr_calls_2_val.txt', '/media/lytic/STORE/ru_open_stt_wav/text/buriy_audiobooks_2_val.txt', '/media/lytic/STORE/ru_open_stt_wav/text/public_youtube700_val.txt' ]