def predict_step(inputs, params, cache, eos_id, max_decode_len, config, beam_size=4): """Predict translation with fast decoding beam search on a batch.""" batch_size = inputs.shape[0] # Prepare transformer fast-decoder call for beam search: for beam search, we # need to set up our decoder model to handle a batch size equal to # batch_size * beam_size, where each batch item's data is expanded in-place # rather than tiled. # i.e. if we denote each batch element subtensor as el[n]: # [el0, el1, el2] --> beamsize=2 --> [el0,el0,el1,el1,el2,el2] src_padding_mask = decode.flat_batch_beam_expand((inputs > 0)[..., None], beam_size) tgt_padding_mask = decode.flat_batch_beam_expand( jnp.ones((batch_size, 1, 1)), beam_size) encoded_inputs = decode.flat_batch_beam_expand( models.Transformer(config).apply({'param': params}, inputs, method=models.Transformer.encode), beam_size) def tokens_ids_to_logits(flat_ids, flat_cache): """Token slice to logits from decoder model.""" # --> [batch * beam, 1, vocab] flat_logits, new_vars = models.Transformer(config).apply( { 'param': params, 'cache': flat_cache }, encoded_inputs, src_padding_mask, flat_ids, tgt_padding_mask=tgt_padding_mask, mutable=['cache'], method=models.Transformer.decode) new_flat_cache = new_vars['cache'] # Remove singleton sequence-length dimension: # [batch * beam, 1, vocab] --> [batch * beam, vocab] flat_logits = flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache # Using the above-defined single-step decoder function, run a # beam search over possible sequences given input encoding. beam_seqs, _ = decode.beam_search(inputs, cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, eos_id=eos_id, max_decode_len=max_decode_len) # Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension # sorted in increasing order of log-probability. # Return the highest scoring beam sequence, drop first dummy 0 token. return beam_seqs[:, -1, 1:]
def train(article, title, word2idx, target2idx, source_lengths, target_lengths, args, val_article=None, val_title=None, val_source_lengths=None, val_target_lengths=None): if not os.path.exists('./temp/x.pkl'): size_of_val = int(len(article) * 0.05) val_article, val_title, val_source_lengths, val_target_lengths = \ utils.sampling(article, title, source_lengths, target_lengths, size_of_val) utils.save_everything(article, title, source_lengths, target_lengths, val_article, val_title, val_source_lengths, val_target_lengths, word2idx) size_of_val = len(val_article) batch_size = args.batch train_size = len(article) val_size = len(val_article) max_a = max(source_lengths) max_t = max(target_lengths) print("source vocab size:", len(word2idx)) print("target vocab size:", len(target2idx)) print("max a:{}, max t:{}".format(max_a, max_t)) print("train_size:", train_size) print("val size:", val_size) print("batch_size:", batch_size) print("-" * 30) use_coverage = False encoder = Encoder(len(word2idx)) decoder = Decoder(len(target2idx), 50) if os.path.exists('decoder_model'): encoder.load_state_dict(torch.load('encoder_model')) decoder.load_state_dict(torch.load('decoder_model')) optimizer = torch.optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=0.001) n_epoch = 5 print("Making word index and extend vocab") #article, article_tar, title, ext_vocab_all, ext_count = indexing_word(article, title, word2idx, target2idx) #article = to_tensor(article) #article_extend = to_tensor(article_extend) #title = to_tensor(title) print("preprocess done") if args.use_cuda: encoder.cuda() decoder.cuda() print("start training") for epoch in range(n_epoch): total_loss = 0 batch_n = int(train_size / batch_size) if epoch > 0: use_coverage = True for b in range(batch_n): # initialization batch_x = article[b * batch_size:(b + 1) * batch_size] batch_y = title[b * batch_size:(b + 1) * batch_size] #batch_x_ext = article_extend[b*batch_size: (b+1)*batch_size] batch_x, batch_x_ext, batch_y, extend_vocab, extend_lengths = \ utils.batch_index(batch_x, batch_y, word2idx, target2idx) if args.use_cuda: batch_x = batch_x.cuda() batch_y = batch_y.cuda() batch_x_ext = batch_x_ext.cuda() x_lengths = source_lengths[b * batch_size:(b + 1) * batch_size] y_lengths = target_lengths[b * batch_size:(b + 1) * batch_size] # work around to deal with length pack = pack_padded_sequence(batch_x_ext, x_lengths, batch_first=True) batch_x_ext_var, _ = pad_packed_sequence(pack, batch_first=True) current_loss = train_on_batch(encoder, decoder, optimizer, batch_x, batch_y, x_lengths, y_lengths, word2idx, target2idx, batch_x_ext_var, extend_lengths, use_coverage) batch_x = batch_x.cpu() batch_y = batch_y.cpu() batch_x_ext = batch_x_ext.cpu() print('epoch:{}/{}, batch:{}/{}, loss:{}'.format( epoch + 1, n_epoch, b + 1, batch_n, current_loss)) if (b + 1) % args.show_decode == 0: torch.save(encoder.state_dict(), 'encoder_model') torch.save(decoder.state_dict(), 'decoder_model') batch_x_val, batch_x_ext_val, batch_y_val, extend_vocab, extend_lengths = \ utils.batch_index(val_article, val_title, word2idx, target2idx) for i in range(1): idx = np.random.randint(0, val_size) decode.beam_search(encoder, decoder, batch_x_val[idx].unsqueeze(0), batch_y_val[idx].unsqueeze(0), word2idx, target2idx, batch_x_ext_val[idx], extend_lengths[idx], extend_vocab[idx]) batch_x_val = batch_x_val.cpu() batch_y_val = batch_y_val.cpu() batch_x_ext_val = batch_x_ext_val.cpu() total_loss += current_loss print('-' * 30) print() print("training finished")