class Train(object): def __init__(self, model_file_path=None): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) if not model_file_path: train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time()))) if not os.path.exists(train_dir): os.mkdir(train_dir) else: train_dir = re.sub('/model/model.*', '', model_file_path) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.summary_writer = tf.summary.create_file_writer(train_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join( self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) # self.optimizer = Adam(params) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def f(self, x, alpha): # # 1 - x ** alpha # k = utils.EPOCH / (utils.MAX_EPOCH / 2) - 1 # return k * x + (1 - k)/2 return 1 - x**alpha def get_loss_mask(self, src, tgt, absts, alpha=config.alpha): loss_mask = [] for i in range(len(src)): # debug('src[i]',src[i]) # debug('tgt[i]',src[i]) # cnt = 0 # tgt_i = [t for t in tgt[i] if t != 1] # src_i = set([s for s in src[i] if s != 1]) # debug('src_i',src_i) # m = [t for t in tgt_i if t not in src_i ] # # for token in tgt_i: # # if token not in src_i: # # cnt += 1 # cnt = len(m) # abst = round(cnt / len(tgt_i),4) abst = absts[i] loss_factor = self.f(abst, alpha) loss_mask.append(loss_factor) return torch.Tensor(loss_mask).cuda() def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) # debug(batch.original_articles[0]) # debug(batch.original_abstracts[0]) loss_mask = self.get_loss_mask(enc_batch, dec_batch, batch.absts) # debug('loss_mask',loss_mask) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage, tau = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) # debug('enc_batch',enc_batch.size()) # debug('dec_batch',dec_batch.size()) # debug('final_dist', final_dist.size()) # debug('target',target) # debug('gold_probs',gold_probs) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask # debug('step_loss_before',step_loss) # debug('config.loss_mask',config.loss_mask) if config.loss_mask: step_loss = step_loss * loss_mask # pass # debug('step_loss_after',step_loss) step_losses.append(step_loss) if config.DEBUG: # break pass sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) if not config.DEBUG: loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item(), tau def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) start = time.time() start_iter = iter while iter < n_iters: batch = self.batcher.next_batch() loss, tau = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) iter += 1 if config.DEBUG: debug('iter', iter) if iter - start_iter > config.BREAK_POINT: break if iter % 100 == 0: self.summary_writer.flush() print_interval = 100 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) if config.adaptive_sparsemax: print('tau + eps', [ round(e[0], 4) for e in (tau + config.eps).detach().cpu().numpy().tolist() ]) start = time.time() if iter % 5000 == 0: self.save_model(running_avg_loss, iter)
class Train(object): def __init__(self): #config("print.vocab_path ",config.vocab_path) self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time()))) if not os.path.exists(train_dir): os.mkdir(train_dir) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.summary_writer = tf.summary.FileWriter(train_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join( self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) #print("params : ",params) #print("params collection is completed....") initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 #### Loading state where the training stopped earlier use that to train for future epoches #### if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) ###### Making into GPU/server accessable Variables ##### if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): ########### Below Two lines of code is for just initialization of Encoder and Decoder sizes,vocab, lenghts etc : ###### enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() #print("train_one_batch function ......") encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state( encoder_hidden ) ### Here initially encoder final hiddenstate==decoder first/prev word at timestamp=0 #print("s_t_1 : ",len(s_t_1),s_t_1[0].shape,s_t_1[1].shape) #print("steps.....") #print("max_dec_len = ",max_dec_len) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): ############ Traing [ Teacher Forcing ] ########### y_t_1 = dec_batch[:, di] # Teacher forcing #print("y_t_1 : ",len(y_t_1)) final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) #print("attn_dist : ",len(attn_dist),attn_dist[0].shape) #print("final_dist : ",len(final_dist),final_dist[0].shape) ############## vocab_Size target = target_batch[:, di] #print("target = ",len(target)) gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log( gold_probs + config.eps ) #################################################### Eqn_6 if config.is_coverage: step_coverage_loss = torch.sum( torch.min(attn_dist, coverage), 1) ###############################Eqn_13a step_loss = step_loss + config.cov_loss_wt * step_coverage_loss ###############################Eqn_13b coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): print("trainIters__Started___model_file_path is : ", model_file_path) iter, running_avg_loss = self.setup_train(model_file_path) start = time.time() print("Max iteration : n_iters = ", n_iters) print("going to start running iter NO : ", iter) print("\n******************************\n") while iter < n_iters: print("\n###################################\n") print("iter : ", iter) batch = self.batcher.next_batch() print("batch data loading : ", batch) loss = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) print("running_avg_loss : ", running_avg_loss) iter += 1 if iter % 100 == 0: ##100 self.summary_writer.flush() print_interval = 100 #1000 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) start = time.time() if iter % 500 == 0: ##5000 self.save_model(running_avg_loss, iter)
class Train(object): def __init__(self): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) # print("MODE MUST BE train") # time.sleep(15) self.print_interval = config.print_interval train_dir = config.train_dir if not os.path.exists(train_dir): os.mkdir(train_dir) self.model_dir = train_dir if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) # self.summary_writer = tf.compat.v1.summary.FileWriter(train_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join(self.model_dir, 'iter{}.pt'.format(iter)) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location= lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() if not config.is_hierarchical: encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens) s_t_1 = self.model.reduce_state.forward1(encoder_hidden) else: stop_id = self.vocab.word2id('.') pad_id = self.vocab.word2id('[PAD]') enc_sent_pos = get_sent_position(enc_batch, stop_id, pad_id) dec_sent_pos = get_sent_position(dec_batch, stop_id, pad_id) encoder_outputs, encoder_feature, encoder_hidden, sent_enc_outputs, sent_enc_feature, sent_enc_hidden, sent_enc_padding_mask, sent_lens, seq_lens2 = \ self.model.encoder(enc_batch, enc_lens, enc_sent_pos) s_t_1, sent_s_t_1 = self.model.reduce_state(encoder_hidden, sent_enc_hidden) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing if not config.is_hierarchical: # start = datetime.now() final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder.forward1(y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) # print('NO HIER Time: ',datetime.now() - start) # import pdb; pdb.set_trace() else: # start = datetime.now() max_doc_len = enc_batch.size(1) final_dist, sent_s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, sent_s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, seq_lens2, sent_s_t_1, sent_enc_outputs, sent_enc_feature, sent_enc_padding_mask, sent_lens, max_doc_len, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) # print('DO HIER Time: ',datetime.now() - start) # import pdb; pdb.set_trace() target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses/dec_lens_var loss = torch.mean(batch_avg_loss) # start = datatime.now() loss.backward() # print('{} HIER Time: {}'.format(config.is_hierarchical ,datetime.now() - start)) # import pdb; pdb.set_trace() clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) sys.stdout.flush() # data_path = "lib/data/batches_train.vocab50000.batch16.pk.bin" # with open(data_path, 'rb') as f: # stored_batches = pickle.load(f, encoding="bytes") # print("loaded data: {}".format(data_path)) # num_batches = len(stored_batches) while iter < n_iters: batch = self.batcher.next_batch() # batch_id = iter%num_batches # batch = stored_batches[batch_id] loss = self.train_one_batch(batch) # running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, iter) iter += 1 # if iter % 100 == 0: # self.summary_writer.flush() if iter % self.print_interval == 0: print("[{}] iter {}, loss: {:.5f}".format(str(datetime.now()), iter, loss)) sys.stdout.flush() if iter % config.save_every == 0: self.save_model(running_avg_loss, iter) print("Finished training!")
class Train(object): def __init__(self): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time()))) if not os.path.exists(train_dir): os.mkdir(train_dir) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.summary_writer = tf.summary.FileWriter(train_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join( self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) start = time.time() while iter < n_iters: batch = self.batcher.next_batch() loss = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) iter += 1 if iter % 100 == 0: self.summary_writer.flush() print_interval = 50 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) start = time.time() if iter % 5000 == 0: self.save_model(running_avg_loss, iter)
class Train(object): def __init__(self): if config.is_hierarchical: raise Exception("Hierarchical PGN-AMI not supported!") self.vocab = Vocab(config.vocab_path, config.vocab_size) self.pad_id = self.vocab.word2id(PAD_TOKEN) self.start_id = self.vocab.word2id(START_DECODING) self.stop_id = self.vocab.word2id(STOP_DECODING) self.print_interval = config.print_interval train_dir = config.train_dir if not os.path.exists(train_dir): os.mkdir(train_dir) self.model_dir = train_dir if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join(self.model_dir, 'iter{}.pt'.format(iter)) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, ami_data, idx): # enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ # get_ami_input_from_batch(batch, use_cuda) # dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ # get_ami_output_from_batch(batch, use_cuda) enc_pack, dec_pack = get_a_batch(ami_data, idx, self.vocab, config.batch_size, config.max_enc_steps, config.max_dec_steps, self.start_id, self.stop_id, self.pad_id, sum_type='short', use_cuda=use_cuda) enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = enc_pack dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = dec_pack self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state.forward1(encoder_hidden) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder.forward1( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) sys.stdout.flush() ami_data = load_ami_data('train') valid_data = load_ami_data('valid') # make the training data 100 random.shuffle(valid_data) ami_data.extend(valid_data[:6]) valid_data = valid_data[6:] num_batches = len(ami_data) idx = 0 # validation & stopping best_valid_loss = 1000000000 stop_counter = 0 while iter < n_iters: if idx == 0: print("shuffle training data") random.shuffle(ami_data) loss = self.train_one_batch(ami_data, idx) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, iter) iter += 1 idx += config.batch_size if idx == num_batches: idx = 0 if iter % self.print_interval == 0: print("[{}] iter {}, loss: {:.5f}".format( str(datetime.now()), iter, loss)) sys.stdout.flush() if iter % config.save_every == 0: self.save_model(running_avg_loss, iter) if iter % config.eval_every == 0: valid_loss = self.run_eval(valid_data) print("valid_loss = {:.5f}".format(valid_loss)) if valid_loss < best_valid_loss: stop_counter = 0 best_valid_loss = valid_loss print("VALID better") else: stop_counter += 1 print( "VALID NOT better, counter = {}".format(stop_counter)) if stop_counter == config.stop_after: print("Stop training") return print("Finished training!") def eval_one_batch(self, eval_data, idx): enc_pack, dec_pack = get_a_batch(eval_data, idx, self.vocab, 1, config.max_enc_steps, config.max_dec_steps, self.start_id, self.stop_id, self.pad_id, sum_type='short', use_cuda=use_cuda) enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = enc_pack dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = dec_pack encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state.forward1(encoder_hidden) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder.forward1( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, dim=1, index=target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_step_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_step_losses / dec_lens_var loss = torch.mean(batch_avg_loss) return loss.data.item() def run_eval(self, eval_data): running_avg_loss, iter = 0, 0 batch_losses = [] num_batches = len(eval_data) print("valid data size = {}".format(num_batches)) for idx in range(num_batches): loss = self.eval_one_batch(eval_data, idx) batch_losses.append(loss) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, iter) print("#", end="") sys.stdout.flush() print() avg_loss = sum(batch_losses) / len(batch_losses) return avg_loss
class Train: def __init__(self): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time()))) if not os.path.exists(train_dir): os.mkdir(train_dir) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.summary_writer = tf.summary.FileWriter(train_dir) def save_model(self, moving_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': moving_avg_loss } model_save_path = os.path.join( self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.do_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] # 在训练到某个epoch,需要切换到coverage结构,因此需要使用新的optimizer状态。此处控制切换时机。 if not config.do_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, context_v, coverage = \ get_encoder_variables(batch, use_cuda) # dec_lens_var:一个batch的decoder目标序列长度 dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_decoder_variables(batch, use_cuda) self.optimizer.zero_grad() if 0 in enc_lens: print('=================') print(enc_batch.shape) print(enc_lens) print(enc_batch) print('=================') encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) d_hc = self.model.reduce_state(encoder_hidden) # decoder初始h,c step_losses = [] # for step in tqdm.tqdm(range(min(max_dec_len, config.max_dec_steps))): for step in range(min(max_dec_len, config.max_dec_steps)): d_inp = dec_batch[:, step] # Teacher forcing final_dist, d_hc, context_v, attn_dist, p_gen, next_coverage = self.model.decoder( d_inp, d_hc, encoder_outputs, encoder_feature, enc_padding_mask, context_v, extra_zeros, enc_batch_extend_vocab, coverage, step) target = target_batch[:, step] # gather每一步target id的预测概率 gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.do_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) # encoder的累计分布作为损失,见原论文 step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, step] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): iter, moving_avg_loss = self.setup_train(model_file_path) start = time.time() pbar = tqdm.tqdm(total=n_iters) while iter < n_iters: batch = self.batcher.next_batch() loss = self.train_one_batch(batch) moving_avg_loss = calc_moving_avg_loss(loss, moving_avg_loss, self.summary_writer, iter) iter += 1 pbar.update(1) if iter % 100 == 0: self.summary_writer.flush() print_interval = 100 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) start = time.time() if iter % 5000 == 0: self.save_model(moving_avg_loss, iter) pbar.close()
class Trainer: def __init__(self, config): self.config = config self.step = 0 self.vocab = Vocab(config.vocab_file, config.vocab_size) self.train_data = CNNDMDataset('train', config.data_path, config, self.vocab) self.validate_data = CNNDMDataset('val', config.data_path, config, self.vocab) # self.model = Model(config).to(device) # self.optimizer = None self.setup(config) def setup(self, config): model = Model(config) checkpoint = None if config.train_from != '': logging('Train from %s' % config.train_from) checkpoint = torch.load(config.train_from, map_location='cpu') model.load_state_dict(checkpoint['model']) self.step = checkpoint['step'] self.model = model.to(device) self.optimizer = Adagrad(model.parameters(), lr=config.learning_rate, initial_accumulator_value=config.initial_acc) if checkpoint is not None: self.optimizer.load_state_dict(checkpoint['optimizer']) def train_one(self, batch): config = self.config enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, config, device) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, device) encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) step_losses = [] for di in range(max_dec_len): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) return loss def train(self): config = self.config train_loader = DataLoader(self.train_data, batch_size=config.batch_size, shuffle=True, collate_fn=Collate()) running_avg_loss = 0 self.model.train() for e in range(config.train_epoch): for batch in train_loader: self.step += 1 self.optimizer.zero_grad() loss = self.train_one(batch) loss.backward() clip_grad_norm_(self.model.parameters(), config.max_grad_norm) self.optimizer.step() #print(loss.item()) running_avg_loss = calc_running_avg_loss( loss.item(), running_avg_loss) if self.step % config.report_every == 0: logging("Step %d Train loss %.3f" % (self.step, running_avg_loss)) if self.step % config.validate_every == 0: self.validate() if self.step % config.save_every == 0: self.save(self.step) if self.step % config.test_every == 0: pass @torch.no_grad() def validate(self): self.model.eval() validate_loader = DataLoader(self.validate_data, batch_size=self.config.batch_size, shuffle=False, collate_fn=Collate()) losses = [] for batch in validate_loader: loss = self.train_one(batch) losses.append(loss.item()) self.model.train() ave_loss = sum(losses) / len(losses) logging('Validate loss : %f' % ave_loss) def save(self, step): state = { 'model': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'step': step } save_path = os.path.join(self.config.model_path, 'model_s%d.pt' % step) logging('Saving model step %d to %s...' % (step, save_path)) torch.save(state, save_path)
class Trainer: def __init__(self, config): self.config = config self.device = config['device'] self.step = 0 if os.path.exists('../vocab.pt'): self.vocab = torch.load('../vocab.pt') else: self.vocab = Vocab(config['vocab_file'], config['vocab_size']) torch.save(self.vocab, '../vocab.pt') self.train_data = CNNDMDataset('train', config['data_path'], config, self.vocab) self.validate_data = CNNDMDataset('val', config['data_path'], config, self.vocab) self.setup(config) def setup(self, config): self.model = Model(config).to(config['device']) self.optimizer = Adagrad(self.model.parameters(), lr=config['learning_rate'], initial_accumulator_value=0.1) # self.optimizer = Adam(self.model.parameters(),lr = config['learning_rate'],betas = config['betas']) checkpoint = None if config[ 'train_from'] != '': # Counter在两次mostCommon间, 相同频率的元素可能以不同的次序输出...! logging('Train from %s' % config['train_from']) checkpoint = torch.load(config['train_from'], map_location='cpu') self.model.load_state_dict(checkpoint['model']) self.step = checkpoint['step'] self.vocab = checkpoint['vocab'] self.optimizer.load_state_dict(checkpoint['optimizer']) # print('State dict parameters:') # for n in model.state_dict().keys(): # print(n) #self.optimizer = Adam(self.model.parameters(),lr = config['learning_rate'],betas = config['betas']) def train_one(self, batch): """ coverage not implemented """ config = self.config enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros = \ get_input_from_batch(batch, config, self.device) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, self.device) pred = self.model(enc_batch, dec_batch, enc_padding_mask, dec_padding_mask, enc_batch_extend_vocab, extra_zeros) # >>>>>>>> DEBUG Session <<<<<<<<< # print("ENC\n") # print(enc_batch) # print("DEC\n") # print(dec_batch) # print("TGT\n") # print(target_batch) # print("ENCP\n") # print(enc_padding_mask) # print("DECP\n") # print(dec_padding_mask) # encs = [self.vocab.id2word(int(v)) for v in enc_batch[:, 0]] # decs = [self.vocab.id2word(int(v)) for v in dec_batch[:, 0]] # print(' '.join(encs)) # print(' '.join(decs)) #print(pred.max(dim=-1)[1][:,0]) # #loss = self.model.nll_loss(pred, target_batch, dec_lens_var) loss = self.model.label_smoothing_loss(pred, target_batch) return loss def train(self): config = self.config train_loader = DataLoader(self.train_data, batch_size=config['batch_size'], shuffle=True, collate_fn=Collate()) running_avg_loss = 0 self.model.train() for _ in range(config['train_epoch']): for batch in train_loader: self.step += 1 loss = self.train_one(batch) running_avg_loss = calc_running_avg_loss( loss.item(), running_avg_loss) loss.div(float(config['gradient_accum'])).backward() if self.step % config[ 'gradient_accum'] == 0: # gradient accumulation clip_grad_norm_(self.model.parameters(), config['max_grad_norm']) self.optimizer.step() self.optimizer.zero_grad() if self.step % config['report_every'] == 0: logging("Step %d Train loss %.3f" % (self.step, running_avg_loss)) if self.step % config['save_every'] == 0: self.save() if self.step % config['validate_every'] == 0: self.validate() @torch.no_grad() def validate(self): self.model.eval() validate_loader = DataLoader(self.validate_data, batch_size=self.config['batch_size'], shuffle=False, collate_fn=Collate()) losses = [] for batch in tqdm(validate_loader): loss = self.train_one(batch) losses.append(loss.item()) self.model.train() ave_loss = sum(losses) / len(losses) logging('Validate loss : %f' % ave_loss) def save(self): state = { 'model': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'step': self.step, 'vocab': self.vocab } save_path = os.path.join(self.config['model_path'], 'model_s%d.pt' % self.step) logging('Saving model step %d to %s...' % (self.step, save_path)) torch.save(state, save_path)
class Train(object): def __init__(self, opt): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time()))) if not os.path.exists(train_dir): os.mkdir(train_dir) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.opt = opt self.summary_writer = tf.summary.FileWriter(train_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join( self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): # 训练设置,包括 if self.opt.load_model != None: model_file_path = os.path.join(self.model_dir, self.opt.load_model) else: model_file_path = None self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) if self.opt.train_mle == "yes": step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum( torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var mle_loss = torch.mean(batch_avg_loss) else: mle_loss = get_cuda(torch.FloatTensor([0])) # --------------RL training----------------------------------------------------- if self.opt.train_rl == "yes": # perform reinforcement learning training # multinomial sampling sample_sents, RL_log_probs = self.train_batch_RL( encoder_outputs, encoder_hidden, enc_padding_mask, encoder_feature, enc_batch_extend_vocab, extra_zeros, c_t_1, batch.art_oovs, coverage, greedy=False) with torch.autograd.no_grad(): # greedy sampling greedy_sents, _ = self.train_batch_RL(encoder_outputs, encoder_hidden, enc_padding_mask, encoder_feature, enc_batch_extend_vocab, extra_zeros, c_t_1, batch.art_oovs, coverage, greedy=True) sample_reward = self.reward_function(sample_sents, batch.original_abstracts) baseline_reward = self.reward_function(greedy_sents, batch.original_abstracts) # if iter%200 == 0: # self.write_to_file(sample_sents, greedy_sents, batch.original_abstracts, sample_reward, baseline_reward, iter) rl_loss = -( sample_reward - baseline_reward ) * RL_log_probs # Self-critic policy gradient training (eq 15 in https://arxiv.org/pdf/1705.04304.pdf) rl_loss = torch.mean(rl_loss) batch_reward = torch.mean(sample_reward).item() else: rl_loss = get_cuda(torch.FloatTensor([0])) batch_reward = 0 #loss.backward() (self.opt.mle_weight * mle_loss + self.opt.rl_weight * rl_loss).backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return mle_loss.item(), batch_reward def train_batch_RL(self, encoder_outputs, encoder_hidden, enc_padding_mask, encoder_feature, enc_batch_extend_vocab, extra_zeros, c_t_1, article_oovs, coverage, greedy): '''Generate sentences from decoder entirely using sampled tokens as input. These sentences are used for ROUGE evaluation Args :param enc_out: Outputs of the encoder for all time steps (batch_size, length_input_sequence, 2*hidden_size) :param enc_hidden: Tuple containing final hidden state & cell state of encoder. Shape of h & c: (batch_size, hidden_size) :param enc_padding_mask: Mask for encoder input; Tensor of size (batch_size, length_input_sequence) with values of 0 for pad tokens & 1 for others :param ct_e: encoder context vector for time_step=0 (eq 5 in https://arxiv.org/pdf/1705.04304.pdf) :param extra_zeros: Tensor used to extend vocab distribution for pointer mechanism :param enc_batch_extend_vocab: Input batch that stores OOV ids :param article_oovs: Batch containing list of OOVs in each example :param greedy: If true, performs greedy based sampling, else performs multinomial sampling Returns: :decoded_strs: List of decoded sentences :log_probs: Log probabilities of sampled words ''' s_t_1 = self.model.reduce_state( encoder_hidden) # Decoder hidden states y_t_1 = get_cuda( torch.LongTensor(len(encoder_outputs)).fill_( self.vocab.word2id(data.START_DECODING)) ) # Input to the decoder #Used for intra-temporal attention (section 2.1 in https://arxiv.org/pdf/1705.04304.pdf) inds = [] # Stores sampled indices for each time step decoder_padding_mask = [ ] # 存储生成样本的填充掩码 Stores padding masks of generated samples log_probs = [] # Stores log probabilites of generated samples mask = get_cuda( torch.LongTensor(len(encoder_outputs)).fill_(1) ) # Values that indicate whether [STOP] token has already been encountered; 1 => Not encountered, 0 otherwise for t in range(config.max_dec_steps): probs, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, t) if greedy is False: multi_dist = Categorical(probs) # 根据概率分布进行采样 y_t_1 = multi_dist.sample() # perform multinomial sampling log_prob = multi_dist.log_prob(y_t_1) log_probs.append(log_prob) else: _, y_t_1 = torch.max( probs, dim=1 ) # 取概率最大的词 #perform greedy sampling y_t_1 = y_t_1.detach() inds.append(y_t_1) mask_t = get_cuda(torch.zeros(len(encoder_outputs)) ) # Padding mask of batch for current time step mask_t[ mask == 1] = 1 # If [STOP] is not encountered till previous time step, mask_t = 1 else mask_t = 0 mask[ (mask == 1) + (y_t_1 == self.vocab.word2id(data.STOP_DECODING)) == 2] = 0 # If [STOP] is not encountered till previous time step and current word is [STOP], make mask = 0 decoder_padding_mask.append(mask_t) is_oov = (y_t_1 >= config.vocab_size ).long() # Mask indicating whether sampled word is OOV y_t_1 = (1 - is_oov) * y_t_1 + (is_oov) * self.vocab.word2id( data.UNKNOWN_TOKEN) # Replace OOVs with [UNK] token inds = torch.stack(inds, dim=1) decoder_padding_mask = torch.stack(decoder_padding_mask, dim=1) if greedy is False: # If multinomial based sampling, compute log probabilites of sampled words log_probs = torch.stack(log_probs, dim=1) log_probs = log_probs * decoder_padding_mask # Not considering sampled words with padding mask = 0 lens = torch.sum(decoder_padding_mask, dim=1) # Length of sampled sentence log_probs = torch.sum( log_probs, dim=1 ) / lens # (bs,) #compute normalizied log probability of a sentence decoded_strs = [] for i in range(len(encoder_outputs)): id_list = inds[i].cpu().numpy() oovs = article_oovs[i] S = data.outputids2words( id_list, self.vocab, oovs) # Generate sentence corresponding to sampled words try: end_idx = S.index(data.STOP_DECODING) S = S[:end_idx] except ValueError: S = S if len( S ) < 2: # If length of sentence is less than 2 words, replace it with "xxx"; Avoids setences like "." which throws error while calculating ROUGE S = ["xxx"] S = " ".join(S) decoded_strs.append(S) return decoded_strs, log_probs def reward_function(self, decoded_sents, original_sents): rouge = Rouge() try: scores = rouge.get_scores(decoded_sents, original_sents) except Exception: print( "Rouge failed for multi sentence evaluation.. Finding exact pair" ) scores = [] for i in range(len(decoded_sents)): try: score = rouge.get_scores(decoded_sents[i], original_sents[i]) except Exception: print("Error occured at:") print("decoded_sents:", decoded_sents[i]) print("original_sents:", original_sents[i]) score = [{"rouge-1": {"p": 0.0}}] scores.append(score[0]) rouge_l_p1 = [score["rouge-1"]["p"] for score in scores] rouge_l_p1 = get_cuda(torch.FloatTensor(rouge_l_p1)) return rouge_l_p1 def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) start = time.time() while iter < n_iters: batch = self.batcher.next_batch() loss = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) iter += 1 if iter % 50 == 0: self.summary_writer.flush() print_interval = 50 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) start = time.time() if iter % 100 == 0: self.save_model(running_avg_loss, iter)
class Train(object): def __init__(self): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) train_dir = os.path.join(config.ouput_root, 'train_%d' % (int(time.time()))) if not os.path.exists(train_dir): os.makedirs(train_dir) self.checkpoint_dir = os.path.join(train_dir, 'checkpoints') if not os.path.exists(self.checkpoint_dir): os.makedirs(self.checkpoint_dir) self.train_summary_writer = tf.summary.create_file_writer( os.path.join(train_dir, 'log', 'train')) self.eval_summary_writer = tf.summary.create_file_writer( os.path.join(train_dir, 'log', 'eval')) def save_model(self, model_path, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } torch.save(state, model_path) def setup_train(self, model_file_path=None): self.model = Model(device, model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.to(device) return start_iter, start_loss def train_one_batch(self, batch, forcing_ratio=1): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, device) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, device) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) step_losses = [] y_t_1_hat = None for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # decide the next input if di == 0 or random.random() < forcing_ratio: x_t = y_t_1 # teacher forcing, use label from last time step as input else: # use embedding of UNK for all oov word y_t_1_hat[y_t_1_hat > self.vocab.size()] = self.vocab.word2id( UNKNOWN_TOKEN) x_t = y_t_1_hat.flatten( ) # use prediction from last time step as input final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( x_t, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) _, y_t_1_hat = final_dist.data.topk(1) target = target_batch[:, di].unsqueeze(1) step_loss = cal_NLLLoss(target, final_dist) if config.is_coverage: # if not using coverge, keep coverage=None step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] # padding in target should not count into loss step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def train(self, n_iters, init_model_path=None): iter, avg_loss = self.setup_train(init_model_path) start = time.time() cnt = 0 best_model_path = None min_eval_loss = float('inf') while iter < n_iters: s = config.forcing_ratio k = config.decay_to_0_iter x = iter nere_zero = 0.0001 if config.forcing_decay_type: if x >= config.decay_to_0_iter: forcing_ratio = 0 elif config.forcing_decay_type == 'linear': forcing_ratio = s * (k - x) / k elif config.forcing_decay_type == 'exp': p = pow(nere_zero, 1 / k) forcing_ratio = s * (p**x) elif config.forcing_decay_type == 'sig': r = math.log((1 / nere_zero) - 1) / k forcing_ratio = s / (1 + pow(math.e, r * (x - k / 2))) else: raise ValueError('Unrecognized forcing_decay_type: ' + config.forcing_decay_type) else: forcing_ratio = config.forcing_ratio batch = self.batcher.next_batch() loss = self.train_one_batch(batch, forcing_ratio=forcing_ratio) model_path = os.path.join(self.checkpoint_dir, 'model_step_%d' % (iter + 1)) avg_loss = calc_avg_loss(loss, avg_loss) if (iter + 1) % config.print_interval == 0: with self.train_summary_writer.as_default(): tf.summary.scalar(name='loss', data=loss, step=iter) self.train_summary_writer.flush() logger.info('steps %d, took %.2f seconds, train avg loss: %f' % (iter + 1, time.time() - start, avg_loss)) start = time.time() if config.eval_interval is not None and ( iter + 1) % config.eval_interval == 0: start = time.time() logger.info("Start Evaluation on model %s" % model_path) eval_processor = Evaluate(self.model, self.vocab) eval_loss = eval_processor.run_eval() logger.info( "Evaluation finished, took %.2f seconds, eval loss: %f" % (time.time() - start, eval_loss)) with self.eval_summary_writer.as_default(): tf.summary.scalar(name='eval_loss', data=eval_loss, step=iter) self.eval_summary_writer.flush() if eval_loss < min_eval_loss: logger.info( "This is the best model so far, saving it to disk.") min_eval_loss = eval_loss best_model_path = model_path self.save_model(model_path, eval_loss, iter) cnt = 0 else: cnt += 1 if cnt > config.patience: logger.info( "Eval loss doesn't drop for %d straight times, early stopping.\n" "Best model: %s (Eval loss %f: )" % (config.patience, best_model_path, min_eval_loss)) break start = time.time() elif (iter + 1) % config.save_interval == 0: self.save_model(model_path, avg_loss, iter) iter += 1 else: logger.info( "Training finished, best model: %s, with train loss %f: " % (best_model_path, min_eval_loss))
class Train(object): def __init__(self): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) train_dir = os.path.join(config.log_root, 'train_{}'.format(stamp)) if not os.path.exists(train_dir): os.makedirs(train_dir) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.summary_writer = tf.compat.v1.summary.FileWriter(train_dir) def save_model(self, running_avg_loss, iter_step): """保存模型""" state = { 'iter': iter_step, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) model_save_path = os.path.join(self.model_dir, 'model_{}_{}'.format(iter_step, stamp)) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): """模型初始化或加载、初始化迭代次数、损失、优化器""" # 初始化模型 self.model = Model(model_file_path) # 模型参数的列表 params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr # lr_coverage和lr二选一 # 定义优化器 self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) # 初始化迭代次数和损失 start_iter, start_loss = 0, 0 # 如果传入的已存在的模型路径,加载模型继续训练 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if USE_CUDA: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.to(DEVICE) return start_iter, start_loss def train_one_batch(self, batch): """ 训练一个batch,返回该batch的loss。 enc_batch: torch.Size([16, 400]), 16篇文章的编码,不足400词的用pad的编码补足, oov词汇用0编码; enc_padding_mask: torch.Size([16, 400]), 对应pad的位置为0,其余为1; enc_lens: numpy.ndarray, 列表内每个元素表示每篇article的单词数; enc_batch_extend_vocab:torch.Size([16, 400]), 16篇文章的编码;oov词汇用超过词汇表的编码; extra_zeros: torch.Size([16, 文章oov词汇数量]) zero tensor; c_t_1: torch.Size([16, 512]) zero tensor; coverage: Variable(torch.zeros(batch_size, max_enc_seq_len)) if is_coverage==True else None;coverage模式时后续有值 ---------------------------------------- dec_batch: torch.Size([16, 100]) 摘要编码含有开始符号编码以及PAD; dec_padding_mask: torch.Size([16, 100]) 对应pad的位置为0,其余为1; max_dec_len: 标量,摘要词语数量,不包含pad dec_lens_var: torch.Size([16] 摘要词汇数量 target_batch: torch.Size([16, 100]) 目标摘要编码含有STOP符号编码以及PAD """ enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch) # 暂时未理解extra_zeros含义 self.optimizer.zero_grad() """ # 记得修改Batch类添加vocab属性 print("模型输入文章编码:", "*"*100) print("enc_batch:", enc_batch, enc_batch.size()) print("enc_batch[-1]:", enc_batch[-1]) # print("batch._id_to_word:", batch.vocab._id_to_word) print("enc_batch[-1]原文:", [batch.vocab.id2word(idx) for idx in enc_batch[-1].cpu().numpy()]) print("-"*50) print("enc_padding_mask:", enc_padding_mask, enc_padding_mask.size()) print("-"*50) print("enc_lens:", enc_lens, enc_lens.shape) print("-"*50) print("enc_batch_extend_vocab", enc_batch_extend_vocab, enc_batch_extend_vocab.size()) print("enc_batch_extend_vocab[-1]:", enc_batch_extend_vocab[-1]) print("enc_batch_extend_vocab[-1]的原文:", [batch.vocab.id2word(idx) if idx<50000 else '[UNK]+{}'.format(idx-50000) for idx in enc_batch_extend_vocab[-1].cpu().numpy()]) print("-"*50) print("extra_zeros:", extra_zeros, extra_zeros.size()) print("-"*50) print("c_t_1:", c_t_1, c_t_1.size()) print("-"*50) print("coverage:", coverage) print("*"*100) print("模型输入摘要编码,包括源和目标:", "*"*100) print("dec_batch:", dec_batch, dec_batch.size()) print("dec_batch[0]:", dec_batch[0]) # print("batch._id_to_word:", batch.vocab._id_to_word) print("dec_batch[0]原文:", [batch.vocab.id2word(idx) for idx in dec_batch[0].cpu().numpy()]) print("-"*50) print("dec_padding_mask:", dec_padding_mask, dec_padding_mask.size()) print("-"*50) print("max_dec_len:", max_dec_len) print("-"*50) print("dec_lens_var", dec_lens_var, dec_lens_var.size()) print("-"*50) print("target_batch:", target_batch, target_batch.size()) print("-"*50) print("target_batch[0]:", target_batch[0], target_batch[0].size()) print("target_batch[0]的原文:", [batch.vocab.id2word(idx) if idx<50000 else '[UNK]+{}'.format(idx-50000) for idx in target_batch[0].cpu().numpy()]) print("*"*100) input("任意键继续>>>") """ # [B, max(seq_lens), 2*hid_dim], [B*max(seq_lens), 2*hid_dim], tuple([2, B, hid_dim], [2, B, hid_dim]) encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) # (h,c) = ([3, B, hid_dim], [3, B, hid_dim]) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # 摘要的一个单词,batch里的每个句子的同一位置的单词编码 # print("y_t_1:", y_t_1, y_t_1.size()) final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] # 摘要的下一个单词的编码 # print("target-iter:", target, target.size()) # print("final_dist:", final_dist, final_dist.size()) # input("go on>>") # final_dist 是词汇表每个单词的概率,词汇表是扩展之后的词汇表,也就是大于预设的50_000 gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() # 取出目标单词的概率gold_probs step_loss = -torch.log(gold_probs + config.eps) # 最大化gold_probs,也就是最小化step_loss(添加负号) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): # 训练设置,包括 iter_step, running_avg_loss = self.setup_train(model_file_path) start = time.time() while iter_step < n_iters: # 获取下一个batch数据 batch = self.batcher.next_batch() loss = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter_step) iter_step += 1 if iter_step % 100 == 0: self.summary_writer.flush() # print_interval = 1000 if iter_step % 100 == 0: # lr = self.optimizer.state_dict()['param_groups'][0]['lr'] logging.info('steps %d, seconds for %d steps: %.2f, loss: %f' % (iter_step, 10, time.time() - start, loss)) start = time.time() # 50000次迭代就保存一下模型 if iter_step % 50000 == 0: logging.info("model saved = {}/{}".format(int(iter_step / 50000) + 1, int(config.max_iterations/50000) + 1)) self.save_model(running_avg_loss, iter_step)
class TrainSeq2Seq(object): def __init__(self, is_word_level=False, is_combined=False, alpha=0.3): self.vocab = Vocab(config.vocab_path, config.vocab_size) # self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', # batch_size=config.batch_size, single_pass=False) self.dataset = DailyMailDataset("train", self.vocab) #time.sleep(15) self.is_word_level = is_word_level self.is_combined = is_combined self.alpha = alpha if is_word_level: print("Using Word Level Policy Gradient") elif is_combined: print("Using Combined Policy Gradient w/ alpha = ", alpha) else: print("Using Sentence Level Policy Gradient") train_dir = './train_dumps' # train_dir = './train_dumps' if not os.path.exists(train_dir): #print('create dict') os.mkdir(train_dir) self.model_dir = os.path.join( train_dir, 'dumps_model_{:%m_%d_%H_%M}'.format(datetime.now())) if not os.path.exists(self.model_dir): #print('create folder') os.mkdir(self.model_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join( self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) torch.save(state, model_save_path) return model_save_path def setup(self, seqseq_model, model_file_path): self.model = seqseq_model params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) #self.optimizer = Adam(params, lr=initial_lr) start_iter, start_loss = 0, 0 if model_file_path is not None: print("Loading checkpoint .... ") state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if config.use_gpu: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch_nll(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, config.use_gpu) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, config.use_gpu) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def train_nll(self, n_iters, iter, running_avg_loss): start = time.time() while iter < n_iters: batch = self.batcher.next_batch() loss = self.train_one_batch_nll(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, iter) print("Iteration:", iter, " loss:", loss, " Running avg loss:", running_avg_loss) iter += 1 print_interval = 1000 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) start = time.time() if iter % 1000 == 0: self.save_model(running_avg_loss, iter) def train_pg(self, n_iters, start_iter, start_running_avg_loss, start_pg_losses, start_run_avg_losses, num_epochs=50): """ The generator is trained using policy gradients, using the reward from the discriminator. Training is done for num_batches batches. """ dataloader = DataLoader(self.dataset, batch_size=config.batch_size, shuffle=True, num_workers=1, collate_fn=create_batch_collate( self.vocab, config.batch_size)) # pg_batcher = Batcher(config.train_data_path, self.vocab, mode='train', # batch_size=config.batch_size, single_pass=False) # # time.sleep(15) start = time.time() running_avg_loss = start_running_avg_loss pg_losses = start_pg_losses run_avg_losses = start_run_avg_losses iteration = start_iter for epoch in range(num_epochs): print("Epoch :", epoch + 1) for batch in dataloader: iteration += 1 loss = self.train_one_batch_pg(batch) running_avg_loss = calc_running_avg_loss( loss, running_avg_loss, iteration) print("Iteration:", iteration, " PG loss:", loss, " Running avg loss:", running_avg_loss) pg_losses.append(loss) run_avg_losses.append(running_avg_loss) print_interval = 10 if iteration % print_interval == 0: print( 'steps %d, seconds for %d batch: %.2f , loss: %f' % (iteration, print_interval, time.time() - start, loss)) start = time.time() if iteration % 10 == 0: # Dump model and losses model_file_path = self.save_model(running_avg_loss, iteration) pickle.dump( pg_losses, open( os.path.join( self.model_dir, 'train_pg_losses_{}.p'.format(iteration)), 'wb')) pickle.dump( run_avg_losses, open( os.path.join( self.model_dir, 'train_run_avg_losses_{}.p'.format(iteration)), 'wb')) # Run eval eval_processor = Evaluate_pg( model_file_path, is_word_level=self.is_word_level, is_combined=self.is_combined, alpha=self.alpha) eval_losses = eval_processor.run_eval( self.model_dir, iteration) # Check if we should stop avg_eval_loss = np.mean(eval_losses) if running_avg_loss < avg_eval_loss: print("Stopping at iteration {}".format(iteration)) break def compute_policy_grads_using_rewards(self, sentence_rewards, word_rewards, sentence_losses, word_losses, word_to_sent_ind): if self.is_combined: pg_losses = [[(self.alpha * word_reward + (1 - self.alpha) * sentence_rewards[i][word_to_sent_ind[i][j]]) * word_losses[i][j] for j, word_reward in enumerate(abstract_rewards) if j < len(word_to_sent_ind[i])] for i, abstract_rewards in enumerate(word_rewards)] pg_losses = [sum(pg) for pg in pg_losses] elif self.is_word_level: pg_losses = [[ word_reward * word_losses[i][j] for j, word_reward in enumerate(abstract_rewards) if j < len(word_to_sent_ind[i]) ] for i, abstract_rewards in enumerate(word_rewards)] pg_losses = [sum(pg) for pg in pg_losses] else: pg_losses = [[ rs * sentence_losses[ri][rsi] for rsi, rs in enumerate(r) ] for ri, r in enumerate(sentence_rewards)] pg_losses = [sum(pg) for pg in pg_losses] return pg_losses def compute_pg_loss(self, orig, pred, sentence_losses, split_predictions, word_losses, word_to_sent_ind): sentence_rewards = None word_rewards = None # First compute the rewards if not self.is_word_level or self.is_combined: sentence_rewards = get_sentence_rewards(orig, pred) if self.is_word_level or self.is_combined: word_rewards = get_word_level_rewards(orig, split_predictions) pg_losses = self.compute_policy_grads_using_rewards( sentence_rewards=sentence_rewards, word_rewards=word_rewards, sentence_losses=sentence_losses, word_losses=word_losses, word_to_sent_ind=word_to_sent_ind) return pg_losses def compute_batched_sentence_loss(self, word_losses, orig, pred): orig_sum = [] new_pred = [] pred_sum = [] sentence_losses = [] # Convert the original sum as one single string per article for i in range(len(orig)): orig_sum.append(' '.join(map(str, orig[i]))) new_pred.append([]) pred_sum.append([]) sentence_losses.append([]) batch_sent_indices = [] for i in range(len(pred)): sentence = [] sentence = pred[i] losses = word_losses[i] sentence_indices = [] count = 0 while len(sentence) > 0: try: idx = sentence.index(".") except ValueError: idx = len(sentence) sentence_indices.extend([count for _ in range(idx)]) if count > 0: new_pred[i].append(new_pred[i][count - 1] + sentence[:idx + 1]) else: new_pred[i].append(sentence[:idx + 1]) sentence_losses[i].append(sum(losses[:idx + 1])) sentence = sentence[idx + 1:] losses = losses[idx + 1:] count += 1 batch_sent_indices.append(sentence_indices) for i in range(len(pred)): for j in range(len(new_pred[i])): pred_sum[i].append(' '.join(map(str, new_pred[i][j]))) pg_losses = self.compute_pg_loss(orig_sum, pred_sum, sentence_losses, split_predictions=pred, word_losses=word_losses, word_to_sent_ind=batch_sent_indices) return pg_losses def train_one_batch_pg(self, batch): batch_size = batch.batch_size enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, config.use_gpu) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, config.use_gpu) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) step_losses = [] output_ids = [] # Begin with START symbol y_t_1 = torch.ones(batch_size, dtype=torch.long) * self.vocab.word2id( data.START_DECODING) if config.use_gpu: y_t_1 = y_t_1.cuda() for _ in range(batch_size): output_ids.append([]) step_losses.append([]) for di in range(min(max_dec_len, config.max_dec_steps)): #y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) # NLL step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask # Move on to next token _, idx = torch.max(final_dist, 1) idx = idx.reshape(batch_size, -1).squeeze() y_t_1 = idx for i, pred in enumerate(y_t_1): if not pred.item() == data.PAD_TOKEN: output_ids[i].append(pred.item()) for i, loss in enumerate(step_loss): step_losses[i].append(step_loss[i]) # Obtain the original and predicted summaries original_abstracts = batch.original_abstracts_sents predicted_abstracts = [ data.outputids2words(ids, self.vocab, None) for ids in output_ids ] # Compute the batched loss batched_losses = self.compute_batched_sentence_loss( step_losses, original_abstracts, predicted_abstracts) #batched_losses = Variable(batched_losses, requires_grad=True) losses = torch.stack(batched_losses) losses = losses / dec_lens_var loss = torch.mean(losses) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item()
class Train(object): def __init__(self): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time()))) if not os.path.exists(train_dir): os.mkdir(train_dir) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.summary_writer = tf.summary.FileWriter(train_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join(self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) s_t_1_origin = s_t_1 batch_size = batch.batch_size step_losses = [] sample_idx = [] sample_log_probs = Variable(torch.zeros(batch_size)) baseline_idx = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing, shape [batch_size] final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) # sample if di == 0: # use decoder input[0], which is <BOS> sample_t_1 = dec_batch[:, di] s_t_sample = s_t_1_origin c_t_sample = Variable(torch.zeros((batch_size, 2 * config.hidden_dim))) final_dist, s_t_sample, c_t_sample, attn_dist, p_gen, next_coverage = self.model.decoder(sample_t_1, s_t_sample, encoder_outputs, encoder_feature, enc_padding_mask, c_t_sample, extra_zeros, enc_batch_extend_vocab, coverage, di) # according to final_dist to sample # change sample_t_1 dist = torch.distributions.Categorical(final_dist) sample_t_1 = Variable(dist.sample()) # record sample idx sample_idx.append(sample_t_1) # tensor list # compute sample probability sample_log_probs += torch.log( final_dist.gather(1, sample_t_1.view(-1, 1))) # gather value along axis=1. given index # baseline if di == 0: # use decoder input[0], which is <BOS> baseline_t_1 = dec_batch[:, di] s_t_sample = s_t_1_origin c_t_sample = Variable(torch.zeros((batch_size, 2 * config.hidden_dim))) final_dist, s_t_baseline, c_t_baseline, attn_dist, p_gen, next_coverage = self.model.decoder(baseline_t_1, s_t_baseline, encoder_outputs, encoder_feature, enc_padding_mask, c_t_baseline, extra_zeros, enc_batch_extend_vocab, coverage, di) # according to final_dist to get baseline # change baseline_t_1 baseline_t_1 = torch.autograd.Variable(final_dist.max(1)) # get max value along axis=1 # record baseline probability baseline_idx.append(baseline_t_1) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) # according to sample_idx and baseline_idx to compute RL loss # map sample/baseline_idx to string # compute rouge score # compute loss sample_idx = torch.stack(sample_idx, dim=1).squeeze() # expect shape (batch_size, seq_len) baseline_idx = torch.stack(baseline_idx, dim=1).squeeze() rl_loss = torch.zeros(batch_size) for i in range(sample_idx.shape[0]): # each example in a batch sample_y = data.outputids2words(sample_idx[i], self.vocab, (batch.art_oovs[i] if config.pointer_gen else None)) baseline_y = data.outputids2words(baseline_idx[i], self.vocab, (batch.art_oovs[i] if config.pointer_gen else None)) true_y = batch.original_abstracts[i] sample_score = rouge_l_f(sample_y, true_y) baseline_score = rouge_l_f(baseline_y, true_y) sample_score = Variable(sample_score) baseline_score = Variable(baseline_score) rl_loss[i] = baseline_score - sample_score rl_loss = rl_loss * sample_log_probs gamma = 0.9984 loss = (1 - gamma) * loss + gamma * rl_loss loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) start = time.time() while iter < n_iters: batch = self.batcher.next_batch() loss = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) iter += 1 if iter % 100 == 0: self.summary_writer.flush() print_interval = 1000 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) start = time.time() if iter % 5000 == 0: self.save_model(running_avg_loss, iter)
class Train(object): def __init__(self): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(5) if not os.path.exists(config.log_root): os.mkdir(config.log_root) self.model_dir = os.path.join(config.log_root, 'train_model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.train_log = os.path.join(config.log_root, 'train_log') if not os.path.exists(self.train_log): os.mkdir(self.train_log) self.summary_writer = tf.summary.FileWriter(self.train_log) def save_model(self, running_avg_loss, iter, mode): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } if mode == 'train': save_model_dir = self.model_dir else: best_model_dir = os.path.join(config.log_root, 'best_model') if not os.path.exists(best_model_dir): os.mkdir(best_model_dir) save_model_dir = best_model_dir if len(os.listdir(save_model_dir)) > 0: shutil.rmtree(save_model_dir) time.sleep(2) os.mkdir(save_model_dir) train_model_path = os.path.join(save_model_dir, 'model_best_%d' % (iter)) torch.save(state, train_model_path) return train_model_path def setup_train(self, model_file_path=None): self.model = Model(model_file_path) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc, weight_decay=config.L2_loss) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() # encoder_outputs shape = (batch_size, max_seq_len, 2*hidden_size) # encoder_feature shape = (batch_size*max_seq_len, 2*hidden_size) # encoder_hidden[0] shape = (batch, 2, hidden_size) encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) # s_t_1[0] shape = (1, batch_size, hidden_size) s_t_1 = self.model.reduce_state(encoder_hidden) ''' print('Actual enc_batch:') en_words = [self.vocab._id_to_word[idx] for idx in enc_batch[0].numpy()] print(en_words) print('Actual de_batch:') de_words = [self.vocab._id_to_word[idx] for idx in dec_batch[0].numpy()] print(de_words) print('Actual tar_batch:') tar_words = [self.vocab._id_to_word[idx] for idx in target_batch[0].numpy()] print(tar_words) ''' step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) start = time.time() min_val_loss = np.inf while iter < n_iters: batch = self.batcher.next_batch() loss = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) iter += 1 if iter % config.print_interval == 0: tf.logging.info( 'steps %d, seconds for %d batch: %.2f , loss: %f, min_val_loss: %f' % (iter, config.print_interval, time.time() - start, loss, min_val_loss)) start = time.time() if iter % config.save_model_iter == 0: self.summary_writer.flush() model_file_path = self.save_model(running_avg_loss, iter, mode='train') tf.logging.info('Evaluate the model %s at validation set....' % model_file_path) evl_model = Evaluate(model_file_path) val_avg_loss = evl_model.run_eval() if val_avg_loss < min_val_loss: min_val_loss = val_avg_loss best_model_file_path = self.save_model(running_avg_loss, iter, mode='eval') tf.logging.info('Save best model at %s' % best_model_file_path)
class Train(object): def __init__(self): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) time.sleep(15) train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time()))) if not os.path.exists(train_dir): os.mkdir(train_dir) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.summary_writer = tf.summary.FileWriter(train_dir) self.last_good_model_save_path = None def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join(self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) # save the path to the last model that was not nan if (not math.isnan(running_avg_loss)): self.last_good_model_save_path = model_save_path torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model(model_file_path) self.last_good_model_save_path = model_file_path params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location= lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage # calculate copy loss vocab_zero = Variable(torch.zeros(self.model.decoder.vocab_dist_.shape, dtype=torch.float)) if use_cuda: vocab_zero = vocab_zero.cuda() if extra_zeros is not None: vocab_zero = torch.cat([vocab_zero, extra_zeros], 1) attn_dist_ = (1 - p_gen) * attn_dist attn_expanded = vocab_zero.scatter_add(1, enc_batch_extend_vocab, attn_dist_) vocab_zero[:, self.vocab.word2id('[UNK]')] = 1.0 # Not sure whether we want to add loss for the extra vocab indices #vocab_zero[:, config.vocab_size:] = 1.0 y_unk_neg = 1.0 - vocab_zero copyloss=torch.bmm(y_unk_neg.unsqueeze(1), attn_expanded.unsqueeze(2)) # add copy loss with lambda 2 weight step_loss = step_loss + config.copy_loss_wt * copyloss step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses/dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) start = time.time() while iter < n_iters: batch = self.batcher.next_batch() loss = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) iter += 1 if (math.isnan(running_avg_loss)): print('Found a nan loss return. Restarting the training at {}' \ .format(self.last_good_model_save_path)) iter, running_avg_loss = self.setup_train(self.last_good_model_save_path) start = time.time() if iter % 100 == 0: self.summary_writer.flush() print_interval = 1000 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) start = time.time() if iter % 1000 == 0: self.save_model(running_avg_loss, iter)