def test_perplexity(self): nll = NLLLoss() ppl = Perplexity() nll.eval_batch(self.outputs, self.batch) ppl.eval_batch(self.outputs, self.batch) nll_loss = nll.get_loss() ppl_loss = ppl.get_loss() self.assertAlmostEqual(ppl_loss, math.exp(nll_loss))
def test_perplexity(self): nll = NLLLoss() ppl = Perplexity() for output, target in zip(self.outputs, self.targets): nll.eval_batch(output, target) ppl.eval_batch(output, target) nll_loss = nll.get_loss() ppl_loss = ppl.get_loss() self.assertAlmostEqual(ppl_loss, math.exp(nll_loss))
def test_nllloss_WITH_OUT_SIZE_AVERAGE(self): loss = NLLLoss(size_average=False) pytorch_loss = 0 pytorch_criterion = torch.nn.NLLLoss(size_average=False) for output, target in zip(self.outputs, self.targets): loss.eval_batch(output, target) pytorch_loss += pytorch_criterion(output, target) loss_val = loss.get_loss() self.assertAlmostEqual(loss_val, pytorch_loss.data[0])
def test_nllloss_WITH_OUT_SIZE_AVERAGE(self): loss = NLLLoss(reduction='sum') pytorch_loss = 0 pytorch_criterion = torch.nn.NLLLoss(reduction='sum') for output, target in zip(self.outputs, self.targets): loss.eval_batch(output, target) pytorch_loss += pytorch_criterion(output, target) loss_val = loss.get_loss() self.assertAlmostEqual(loss_val, pytorch_loss.item())
def test_nllloss(self): loss = NLLLoss() pytorch_loss = 0 pytorch_criterion = torch.nn.NLLLoss() for output, target in zip(self.outputs, self.targets): loss.eval_batch(output, target) pytorch_loss += pytorch_criterion(output, target) loss_val = loss.get_loss() pytorch_loss /= self.num_batch self.assertAlmostEqual(loss_val, pytorch_loss.data[0])
def test_nllloss_WITH_OUT_SIZE_AVERAGE(self): num_repeat = 10 loss = NLLLoss(reduction='sum') pytorch_loss = 0 pytorch_criterion = torch.nn.NLLLoss(reduction='sum') for _ in range(num_repeat): for step, output in enumerate(self.outputs): pytorch_loss += pytorch_criterion(output, self.targets[:, step + 1]) loss.eval_batch(self.outputs, self.batch) loss_val = loss.get_loss() self.assertAlmostEqual(loss_val, pytorch_loss.item())
def test_nllloss(self): num_batch = 10 loss = NLLLoss() pytorch_loss = 0 pytorch_criterion = torch.nn.NLLLoss() for _ in range(num_batch): for step, output in enumerate(self.outputs): pytorch_loss += pytorch_criterion(output, self.targets[:, step + 1]) loss.eval_batch(self.outputs, self.batch) loss_val = loss.get_loss() pytorch_loss /= (num_batch * len(self.outputs)) self.assertAlmostEqual(loss_val, pytorch_loss.item())
def test_perplexity(self): num_class = 5 num_batch = 10 batch_size = 5 outputs = [F.softmax(Variable(torch.randn(batch_size, num_class))) for _ in range(num_batch)] targets = [Variable(torch.LongTensor([random.randint(0, num_class - 1) for _ in range(batch_size)])) for _ in range(num_batch)] nll = NLLLoss() ppl = Perplexity() for output, target in zip(outputs, targets): nll.eval_batch(output, target) ppl.eval_batch(output, target) nll_loss = nll.get_loss() ppl_loss = ppl.get_loss() self.assertAlmostEqual(ppl_loss, math.exp(nll_loss))
def test_nllloss(self): num_class = 5 num_batch = 10 batch_size = 5 outputs = [F.softmax(Variable(torch.randn(batch_size, num_class))) for _ in range(num_batch)] targets = [Variable(torch.LongTensor([random.randint(0, num_class - 1) for _ in range(batch_size)])) for _ in range(num_batch)] loss = NLLLoss() pytorch_loss = 0 pytorch_criterion = torch.nn.NLLLoss() for output, target in zip(outputs, targets): loss.eval_batch(output, target) pytorch_loss += pytorch_criterion(output, target) loss_val = loss.get_loss() pytorch_loss /= num_batch self.assertAlmostEqual(loss_val, pytorch_loss.data[0])
def main(): parser = argparse.ArgumentParser() opt = options.train_options(parser) opt = parser.parse_args() opt.cuda = torch.cuda.is_available() opt.device = None if opt.cuda else -1 # 快速變更設定 opt.exp_dir = './experiment/transformer-reinforce/use_billion' opt.load_vocab_from = './experiment/transformer/lang8-cor2err/vocab.pt' opt.build_vocab_from = './data/billion/billion.30m.model.vocab' opt.load_D_from = opt.exp_dir # opt.load_D_from = None # dataset params opt.max_len = 20 # G params # opt.load_G_a_from = './experiment/transformer/lang8-err2cor/' # opt.load_G_b_from = './experiment/transformer/lang8-cor2err/' opt.d_word_vec = 300 opt.d_model = 300 opt.d_inner_hid = 600 opt.n_head = 6 opt.n_layers = 3 opt.embs_share_weight = False opt.beam_size = 1 opt.max_token_seq_len = opt.max_len + 2 # 包含<BOS>, <EOS> opt.n_warmup_steps = 4000 # D params opt.embed_dim = opt.d_model opt.num_kernel = 100 opt.kernel_sizes = [3, 4, 5, 6, 7] opt.dropout_p = 0.25 # train params opt.batch_size = 1 opt.n_epoch = 10 if not os.path.exists(opt.exp_dir): os.makedirs(opt.exp_dir) logging.basicConfig(filename=opt.exp_dir + '/.log', format=LOG_FORMAT, level=logging.DEBUG) logging.getLogger().addHandler(logging.StreamHandler()) logging.info('Use CUDA? ' + str(opt.cuda)) logging.info(opt) # ---------- prepare dataset ---------- def len_filter(example): return len(example.src) <= opt.max_len and len( example.tgt) <= opt.max_len EN = SentencePieceField(init_token=Constants.BOS_WORD, eos_token=Constants.EOS_WORD, batch_first=True, include_lengths=True) train = datasets.TranslationDataset(path='./data/dualgan/train', exts=('.billion.sp', '.use.sp'), fields=[('src', EN), ('tgt', EN)], filter_pred=len_filter) val = datasets.TranslationDataset(path='./data/dualgan/val', exts=('.billion.sp', '.use.sp'), fields=[('src', EN), ('tgt', EN)], filter_pred=len_filter) train_lang8, val_lang8 = Lang8.splits(exts=('.err.sp', '.cor.sp'), fields=[('src', EN), ('tgt', EN)], train='test', validation='test', test=None, filter_pred=len_filter) # 讀取 vocabulary(確保一致) try: logging.info('Load voab from %s' % opt.load_vocab_from) EN.load_vocab(opt.load_vocab_from) except FileNotFoundError: EN.build_vocab_from(opt.build_vocab_from) EN.save_vocab(opt.load_vocab_from) logging.info('Vocab len: %d' % len(EN.vocab)) # 檢查Constants是否有誤 assert EN.vocab.stoi[Constants.BOS_WORD] == Constants.BOS assert EN.vocab.stoi[Constants.EOS_WORD] == Constants.EOS assert EN.vocab.stoi[Constants.PAD_WORD] == Constants.PAD assert EN.vocab.stoi[Constants.UNK_WORD] == Constants.UNK # ---------- init model ---------- # G = build_G(opt, EN, EN) hidden_size = 512 bidirectional = True encoder = EncoderRNN(len(EN.vocab), opt.max_len, hidden_size, n_layers=1, bidirectional=bidirectional) decoder = DecoderRNN(len(EN.vocab), opt.max_len, hidden_size * 2 if bidirectional else 1, n_layers=1, dropout_p=0.2, use_attention=True, bidirectional=bidirectional, eos_id=Constants.EOS, sos_id=Constants.BOS) G = Seq2seq(encoder, decoder) for param in G.parameters(): param.data.uniform_(-0.08, 0.08) # optim_G = ScheduledOptim(optim.Adam( # G.get_trainable_parameters(), # betas=(0.9, 0.98), eps=1e-09), # opt.d_model, opt.n_warmup_steps) optim_G = optim.Adam(G.parameters(), lr=1e-4, betas=(0.9, 0.98), eps=1e-09) loss_G = NLLLoss(size_average=False) if torch.cuda.is_available(): loss_G.cuda() # # 預先訓練D if opt.load_D_from: D = load_model(opt.load_D_from) else: D = build_D(opt, EN) optim_D = torch.optim.Adam(D.parameters(), lr=1e-4) def get_criterion(vocab_size): ''' With PAD token zero weight ''' weight = torch.ones(vocab_size) weight[Constants.PAD] = 0 return nn.CrossEntropyLoss(weight, size_average=False) crit_G = get_criterion(len(EN.vocab)) crit_D = nn.BCELoss() if opt.cuda: G.cuda() D.cuda() crit_G.cuda() crit_D.cuda() # ---------- train ---------- trainer_D = trainers.DiscriminatorTrainer() if not opt.load_D_from: for epoch in range(1): logging.info('[Pretrain D Epoch %d]' % epoch) pool = helper.DiscriminatorDataPool(opt.max_len, D.min_len, Constants.PAD) # 將資料塞進pool中 train_iter = data.BucketIterator(dataset=train, batch_size=opt.batch_size, device=opt.device, sort_key=lambda x: len(x.src), repeat=False) pool.fill(train_iter) # train D trainer_D.train(D, train_iter=pool.batch_gen(), crit=crit_D, optimizer=optim_D) pool.reset() Checkpoint(model=D, optimizer=optim_D, epoch=0, step=0, input_vocab=EN.vocab, output_vocab=EN.vocab).save(opt.exp_dir) def eval_D(): pool = helper.DiscriminatorDataPool(opt.max_len, D.min_len, Constants.PAD) val_iter = data.BucketIterator(dataset=val, batch_size=opt.batch_size, device=opt.device, sort_key=lambda x: len(x.src), repeat=False) pool.fill(val_iter) trainer_D.evaluate(D, val_iter=pool.batch_gen(), crit=crit_D) # eval_D() # Train G ALPHA = 0 for epoch in range(100): logging.info('[Epoch %d]' % epoch) train_iter = data.BucketIterator(dataset=train, batch_size=1, device=opt.device, sort_within_batch=True, sort_key=lambda x: len(x.src), repeat=False) for step, batch in enumerate(train_iter): src_seq = batch.src[0] src_length = batch.src[1] tgt_seq = src_seq[0].clone() # gold = tgt_seq[:, 1:] optim_G.zero_grad() loss_G.reset() decoder_outputs, decoder_hidden, other = G.rollout(src_seq, None, None, n_rollout=1) for i, step_output in enumerate(decoder_outputs): batch_size = tgt_seq.size(0) # print(step_output) # loss_G.eval_batch(step_output.contiguous().view(batch_size, -1), tgt_seq[:, i + 1]) softmax_output = torch.exp( torch.cat([x for x in decoder_outputs], dim=0)).unsqueeze(0) softmax_output = helper.stack(softmax_output, 8) print(softmax_output) rollout = softmax_output.multinomial(1) print(rollout) tgt_seq = helper.pad_seq(tgt_seq.data, max_len=len(decoder_outputs) + 1, pad_value=Constants.PAD) tgt_seq = autograd.Variable(tgt_seq) for i, step_output in enumerate(decoder_outputs): batch_size = tgt_seq.size(0) loss_G.eval_batch( step_output.contiguous().view(batch_size, -1), tgt_seq[:, i + 1]) G.zero_grad() loss_G.backward() optim_G.step() if step % 100 == 0: pred = torch.cat([x for x in other['sequence']], dim=1) print('[step %d] loss_rest %.4f' % (epoch * len(train_iter) + step, loss_G.get_loss())) print('%s -> %s' % (EN.reverse(tgt_seq.data)[0], EN.reverse(pred.data)[0])) # Reinforce Train G for p in D.parameters(): p.requires_grad = False
train_iter = data.BucketIterator(dataset=train, batch_size=16, device=opt.device, sort_within_batch=True, sort_key=lambda x: len(x.src), repeat=False) for step, batch in enumerate(train_iter): src_seq = batch.src[0] src_length = batch.src[1] tgt_seq = src_seq.clone() # a -> b' -> a decoder_outputs, decoder_hiddens, other = G.forward( src_seq, src_length.tolist(), target_variable=None) crit_G.reset() for i, step_output in enumerate(decoder_outputs): batch_size = tgt_seq.size(0) crit_G.eval_batch(step_output.contiguous().view(batch_size, -1), tgt_seq[:, i + 1]) optim_G.zero_grad() crit_G.backward() optim_G.step() if step % 100 == 0: pred = torch.cat([x for x in other['sequence']], dim=1) print('[step %d] loss %.4f' % (epoch * len(train_iter) + step, crit_G.get_loss())) print('%s -> %s' % (EN.reverse(tgt_seq.data)[0], EN.reverse(pred.data)[0]))
def forward(self, batch_x, batch_y=None, teacher_forcing_ratio=0, output_token=False, pad_token='<PAD>', visualize=False): # input_var = batch_x if type(batch_x) == Variable else Variable(batch_x) # input_var = input_var.float() if batch_y is not None: if type(batch_y) == list: batch_y = torch.LongTensor(batch_y).cuda( ) if self.use_cuda else torch.LongTensor(batch_y) batch_y = batch_y if type(batch_y) == Variable else Variable( batch_y) ''' input will be changed to variable by state_encoder's forward function ''' decoder_outputs, decoder_hidden, ret_dict = self.seq2seq( input_variable=batch_x, target_variable=batch_y, teacher_forcing_ratio=teacher_forcing_ratio) ''' decode prediction vector as result ''' pred_tokens = [] preds = [] attentions = [] for ind in range(len(batch_x)): length = ret_dict['length'][ind] tgt_id_seq = [ ret_dict['sequence'][di][ind].data[0] for di in range(length) ] tgt_seq = [self.id2token[int(tok)] for tok in tgt_id_seq] preds.append(tgt_id_seq) pred_tokens.append(tgt_seq) ''' Visualization ''' attentions = ret_dict[ 'attention_score'] # shape = output_len * batch * 1 * input_len if visualize and self.use_attention and not self.training: # print('Bug detect', attentions.shape, len(pred_tokens)) for j in range(len(tgt_seq)): # print('===', tgt_seq[j], 'BD', len(attentions), attentions[j].shape) print('===', tgt_seq[j]) for k in zip(self.state_v_component, attentions[j][ind][0].tolist()): print(k) if self.training: '''computing loss ''' # loss = Perplexity() weight = torch.FloatTensor([1 for i in range(len(self.token2id))]) if self.use_cuda: weight = weight.cuda() loss = NLLLoss(weight=weight, mask=self.token2id[pad_token], size_average=True) # loss = nn.NLLLoss(weight=weight, size_average=size_average) for step, step_output in enumerate(decoder_outputs): try: batch_size = batch_x.size(0) except AttributeError: batch_size = len(batch_x) ''' Select out current step's token. EOS is not included in step_output, so + 1 step for target ''' pred_token_distribute_batch = step_output.contiguous().view( batch_size, -1) target_token_id_v_batch = batch_y[:, step + 1] loss.eval_batch(pred_token_distribute_batch, target_token_id_v_batch) if output_token: return preds, loss.acc_loss, pred_tokens else: return preds, loss.acc_loss else: if output_token: return preds, pred_tokens else: return preds
def forward(self, batch_x, batch_y=None, teacher_forcing_ratio=0, output_token=False, pad_token='<PAD>'): input_var = batch_x if type(batch_x) == Variable else Variable(batch_x) input_var = input_var.float() if batch_y is not None: batch_y = batch_y if type(batch_y) == Variable else Variable( batch_y) decoder_outputs, decoder_hidden, other = self.seq2seq( input_variable=input_var, target_variable=batch_y, teacher_forcing_ratio=teacher_forcing_ratio) # ''' prepare var for loss computing ''' # target_var = [] # for label in batch_y: # tmp_var = [] # for id in label: # token_v = [0 for i in range(len(self.token2id))] # token_v[id] = 1 # tmp_var.append(token_v) # target_var.append(tmp_var) # target_var = torch.LongTensor(target_var) ''' decode prediction vector as result ''' pred_tokens = [] preds = [] for ind in range(len(batch_x)): length = other['length'][ind] tgt_id_seq = [ other['sequence'][di][ind].data[0] for di in range(length) ] tgt_seq = [self.id2token[int(tok)] for tok in tgt_id_seq] preds.append(tgt_id_seq) pred_tokens.append(tgt_seq) if self.training: '''computing loss ''' # loss = Perplexity() weight = torch.FloatTensor([1 for i in range(len(self.token2id))]) if self.use_cuda: weight = weight.cuda() loss = NLLLoss(weight=weight, mask=self.token2id[pad_token], size_average=True) # loss = nn.NLLLoss(weight=weight, size_average=size_average) for step, step_output in enumerate(decoder_outputs): batch_size = batch_x.size(0) ''' Select out current step's token. EOS is not included in step_output, so + 1 step for target ''' pred_token_distribute_batch = step_output.contiguous().view( batch_size, -1) target_token_id_v_batch = batch_y[:, step + 1] loss.eval_batch(pred_token_distribute_batch, target_token_id_v_batch) if output_token: return preds, loss.acc_loss, pred_tokens else: return preds, loss.acc_loss else: if output_token: return preds, pred_tokens else: return preds