class PoemWAE(nn.Module): def __init__(self, config, api, PAD_token=0, pretrain_weight=None): super(PoemWAE, self).__init__() self.vocab = api.vocab self.vocab_size = len(self.vocab) self.rev_vocab = api.rev_vocab self.go_id = self.rev_vocab["<s>"] self.eos_id = self.rev_vocab["</s>"] self.maxlen = config.maxlen self.clip = config.clip self.lambda_gp = config.lambda_gp self.lr_gan_g = config.lr_gan_g self.lr_gan_d = config.lr_gan_d self.n_d_loss = config.n_d_loss self.temp = config.temp self.init_w = config.init_weight self.embedder = nn.Embedding(self.vocab_size, config.emb_size, padding_idx=PAD_token) if pretrain_weight is not None: self.embedder.weight.data.copy_(torch.from_numpy(pretrain_weight)) # 用同一个seq_encoder来编码标题和前后两句话 self.seq_encoder = Encoder(self.embedder, config.emb_size, config.n_hidden, True, config.n_layers, config.noise_radius) # 由于Poem这里context是title和last sentence双向GRU编码后的直接cat,4*hidden # 注意如果使用Poemwar_gmp则使用子类中的prior_net,即混合高斯分布的一个先验分布 self.prior_net = Variation(config.n_hidden * 4, config.z_size, dropout_rate=config.dropout, init_weight=self.init_w) # p(e|c) # 注意这儿原来是给Dialog那个任务用的,3*hidden # Poem数据集上,将title和上一句,另外加上x都分别用双向GRU编码并cat,因此是6*hidden self.post_net = Variation(config.n_hidden * 6, config.z_size, dropout_rate=config.dropout, init_weight=self.init_w) self.post_generator = nn.Sequential( nn.Linear(config.z_size, config.z_size), nn.BatchNorm1d(config.z_size, eps=1e-05, momentum=0.1), nn.ReLU(), nn.Linear(config.z_size, config.z_size), nn.BatchNorm1d(config.z_size, eps=1e-05, momentum=0.1), nn.ReLU(), nn.Linear(config.z_size, config.z_size)) self.post_generator.apply(self.init_weights) self.prior_generator = nn.Sequential( nn.Linear(config.z_size, config.z_size), nn.BatchNorm1d(config.z_size, eps=1e-05, momentum=0.1), nn.ReLU(), nn.Linear(config.z_size, config.z_size), nn.BatchNorm1d(config.z_size, eps=1e-05, momentum=0.1), nn.ReLU(), nn.Linear(config.z_size, config.z_size)) self.prior_generator.apply(self.init_weights) self.init_decoder_hidden = nn.Sequential( nn.Linear(config.n_hidden * 4 + config.z_size, config.n_hidden * 4), nn.BatchNorm1d(config.n_hidden * 4, eps=1e-05, momentum=0.1), nn.ReLU()) # 由于Poem这里context是title和last sentence双向GRU编码后的直接cat,因此hidden_size变为z_size + 4*hidden # 修改:decoder的hidden_size还设为n_hidden, init_hidden使用一个MLP将cat变换为n_hidden self.decoder = Decoder(self.embedder, config.emb_size, config.n_hidden * 4, self.vocab_size, n_layers=1) self.discriminator = nn.Sequential( # 因为Poem的cat两个双向编码,这里改为4*n_hidden + z_size nn.Linear(config.n_hidden * 4 + config.z_size, config.n_hidden * 2), nn.BatchNorm1d(config.n_hidden * 2, eps=1e-05, momentum=0.1), nn.LeakyReLU(0.2), nn.Linear(config.n_hidden * 2, config.n_hidden * 2), nn.BatchNorm1d(config.n_hidden * 2, eps=1e-05, momentum=0.1), nn.LeakyReLU(0.2), nn.Linear(config.n_hidden * 2, 1), ) self.discriminator.apply(self.init_weights) # optimizer 定义,分别对应三个模块的训练,注意!三个模块的optimizer不相同 # self.optimizer_AE = optim.SGD(list(self.seq_encoder.parameters()) self.optimizer_AE = optim.SGD( list(self.seq_encoder.parameters()) + list(self.post_net.parameters()) + list(self.post_generator.parameters()) + list(self.init_decoder_hidden.parameters()) + list(self.decoder.parameters()), lr=config.lr_ae) self.optimizer_G = optim.RMSprop( list(self.post_net.parameters()) + list(self.post_generator.parameters()) + list(self.prior_net.parameters()) + list(self.prior_generator.parameters()), lr=self.lr_gan_g) self.optimizer_D = optim.RMSprop(self.discriminator.parameters(), lr=self.lr_gan_d) self.lr_scheduler_AE = optim.lr_scheduler.StepLR(self.optimizer_AE, step_size=10, gamma=0.8) self.criterion_ce = nn.CrossEntropyLoss() def init_weights(self, m): if isinstance(m, nn.Linear): m.weight.data.uniform_(-self.init_w, self.init_w) # nn.init.kaiming_normal_(m.weight.data) # nn.init.kaiming_uniform_(m.weight.data) m.bias.data.fill_(0) # x: (batch, 2*n_hidden) # c: (batch, 2*2*n_hidden) def sample_code_post(self, x, c): z, _, _ = self.post_net(torch.cat((x, c), 1)) # 输入:(batch, 3*2*n_hidden) z = self.post_generator(z) return z def sample_code_prior_sentiment(self, c, align): choice_statistic = self.prior_net(c, align) # e: (batch, z_size) return choice_statistic def sample_code_prior(self, c): z, _, _ = self.prior_net(c) # e: (batch, z_size) z = self.prior_generator(z) # z: (batch, z_size) return z # 输入 title, context, target, target_lens. # c由title和context encode之后的hidden相concat而成 def train_AE(self, title, context, target, target_lens): self.seq_encoder.train() self.decoder.train() # import pdb # pdb.set_trace() # (batch, 2 * hidden_size) title_last_hidden, _ = self.seq_encoder(title) context_last_hidden, _ = self.seq_encoder(context) # (batch, 2 * hidden_size) x, _ = self.seq_encoder(target[:, 1:], target_lens - 1) # context_embedding c = torch.cat((title_last_hidden, context_last_hidden), 1) # (batch, 2 * hidden_size * 2) z = self.sample_code_post(x, c) # (batch, z_size) # 标准的autoencoder的decode,decoder初态为x, c的cat,将target错位输入 # output: (batch, len, vocab_size) len是9,即7+标点+</s> output = self.decoder(self.init_decoder_hidden(torch.cat((z, c), 1)), None, target[:, :-1], target_lens - 1) flattened_output = output.view(-1, self.vocab_size) dec_target = target[:, 1:].contiguous().view(-1) mask = dec_target.gt(0) # 即判断target的token中是否有0(pad项) masked_target = dec_target.masked_select(mask) # 选出非pad项 output_mask = mask.unsqueeze(1).expand( mask.size(0), self.vocab_size) # [(batch_sz * seq_len) x n_tokens] masked_output = flattened_output.masked_select(output_mask).view( -1, self.vocab_size) self.optimizer_AE.zero_grad() loss = self.criterion_ce(masked_output / self.temp, masked_target) loss.backward() torch.nn.utils.clip_grad_norm_( list(self.seq_encoder.parameters()) + list(self.decoder.parameters()), self.clip) self.optimizer_AE.step() return [('train_loss_AE', loss.item())] # G是来缩短W距离的,可以类比VAE里面的缩小KL散度项 def train_G(self, title, context, target, target_lens, sentiment_mask=None, mask_type=None): self.seq_encoder.eval() self.optimizer_G.zero_grad() for p in self.discriminator.parameters(): p.requires_grad = False title_last_hidden, _ = self.seq_encoder(title) context_last_hidden, _ = self.seq_encoder(context) c = torch.cat((title_last_hidden, context_last_hidden), 1) # (batch, 2 * hidden_size * 2) # -----------------posterior samples --------------------------- x, _ = self.seq_encoder(target[:, 1:], target_lens - 1) z_post = self.sample_code_post( x.detach(), c.detach()) # 去掉梯度,防止梯度向encoder的传播 (batch, z_size) errG_post = torch.mean( self.discriminator(torch.cat( (z_post, c.detach()), 1))) * self.n_d_loss # (batch, z_size + 4 * hidden) errG_post.backward(minus_one) # ----------------- prior samples --------------------------- prior_z = self.sample_code_prior(c.detach()) errG_prior = torch.mean( self.discriminator(torch.cat( (prior_z, c.detach()), 1))) * self.n_d_loss # import pdb # pdb.set_trace() errG_prior.backward(one) self.optimizer_G.step() for p in self.discriminator.parameters(): p.requires_grad = True costG = errG_prior - errG_post return [('train_loss_G', costG.item())] # D是用来拟合W距离,loss下降说明拟合度变好,增大gradient_penalty一定程度上可以提高拟合度 # n_iters_n越大,D训练的次数越多,对应的拟合度也越好 def train_D(self, title, context, target, target_lens): self.seq_encoder.eval() self.discriminator.train() self.optimizer_D.zero_grad() batch_size = context.size(0) title_last_hidden, _ = self.seq_encoder(title) context_last_hidden, _ = self.seq_encoder(context) c = torch.cat((title_last_hidden, context_last_hidden), 1) # (batch, 2, hidden_size * 2) x, _ = self.seq_encoder(target[:, 1:], target_lens - 1) post_z = self.sample_code_post(x, c) errD_post = torch.mean( self.discriminator(torch.cat( (post_z.detach(), c.detach()), 1))) * self.n_d_loss errD_post.backward(one) prior_z = self.sample_code_prior(c) errD_prior = torch.mean( self.discriminator(torch.cat( (prior_z.detach(), c.detach()), 1))) * self.n_d_loss errD_prior.backward(minus_one) # import pdb # pdb.set_trace() alpha = to_tensor(torch.rand(batch_size, 1)) alpha = alpha.expand(prior_z.size()) interpolates = alpha * prior_z.data + ((1 - alpha) * post_z.data) interpolates = Variable(interpolates, requires_grad=True) d_input = torch.cat((interpolates, c.detach()), 1) disc_interpolates = torch.mean(self.discriminator(d_input)) gradients = torch.autograd.grad( outputs=disc_interpolates, inputs=interpolates, grad_outputs=to_tensor(torch.ones(disc_interpolates.size())), create_graph=True, retain_graph=True, only_inputs=True)[0] gradient_penalty = ( (gradients.contiguous().view(gradients.size(0), -1).norm(2, dim=1) - 1)**2).mean() * self.lambda_gp gradient_penalty.backward() self.optimizer_D.step() costD = -(errD_prior - errD_post) + gradient_penalty return [('train_loss_D', costD.item())] def valid(self, title, context, target, target_lens, sentiment_mask=None): self.seq_encoder.eval() self.discriminator.eval() self.decoder.eval() title_last_hidden, _ = self.seq_encoder(title) context_last_hidden, _ = self.seq_encoder(context) c = torch.cat((title_last_hidden, context_last_hidden), 1) # (batch, 2 * hidden_size * 2) x, _ = self.seq_encoder(target[:, 1:], target_lens - 1) post_z = self.sample_code_post(x, c) prior_z = self.sample_code_prior(c) errD_post = torch.mean(self.discriminator(torch.cat((post_z, c), 1))) errD_prior = torch.mean(self.discriminator(torch.cat((prior_z, c), 1))) costD = -(errD_prior - errD_post) costG = -costD dec_target = target[:, 1:].contiguous().view(-1) # (batch_size * len) mask = dec_target.gt(0) # 即判断target的token中是否有0(pad项) masked_target = dec_target.masked_select(mask) # 选出非pad项 output_mask = mask.unsqueeze(1).expand(mask.size(0), self.vocab_size) output = self.decoder( self.init_decoder_hidden(torch.cat((post_z, c), 1)), None, target[:, :-1], (target_lens - 1)) flattened_output = output.view(-1, self.vocab_size) masked_output = flattened_output.masked_select(output_mask).view( -1, self.vocab_size) lossAE = self.criterion_ce(masked_output / self.temp, masked_target) return [('valid_loss_AE', lossAE.item()), ('valid_loss_G', costG.item()), ('valid_loss_D', costD.item())] # 正如论文中说的,测试生成的时候,从先验网络中拿到噪声,用G生成prior_z(即代码中的sample_code_prior(c)) # 然后decoder将prior_z和c的cat当做输入,decode出这句诗(这和论文里面不太一样,论文里面只把prior_z当做输入) # batch_size是1,一次测一句 # title 即标题 # context 上一句 def test(self, title_tensor, title_words, headers): self.seq_encoder.eval() self.discriminator.eval() self.decoder.eval() # tem初始化为[2,3,0,0,0,0,0,0,0] tem = [[2, 3] + [0] * (self.maxlen - 2)] pred_poems = [] title_tokens = [ self.vocab[e] for e in title_words[0].tolist() if e not in [0, self.eos_id, self.go_id] ] pred_poems.append(title_tokens) for sent_id in range(4): tem = to_tensor(np.array(tem)) context = tem # vec_context = np.zeros((batch_size, self.maxlen), dtype=np.int64) # for b_id in range(batch_size): # vec_context[b_id, :] = np.array(context[b_id]) # context = to_tensor(vec_context) title_last_hidden, _ = self.seq_encoder( title_tensor) # (batch=1, 2*hidden) if sent_id == 0: context_last_hidden, _ = self.seq_encoder( title_tensor) # (batch=1, 2*hidden) else: context_last_hidden, _ = self.seq_encoder( context) # (batch=1, 2*hidden) c = torch.cat((title_last_hidden, context_last_hidden), 1) # (batch, 4*hidden_size) # 由于一次只有一首诗,batch_size = 1,因此不必repeat prior_z = self.sample_code_prior(c) # decode_words 是完整的一句诗 decode_words = self.decoder.testing( init_hidden=self.init_decoder_hidden(torch.cat((prior_z, c), 1)), maxlen=self.maxlen, go_id=self.go_id, mode="greedy", header=headers[sent_id]) decode_words = decode_words[0].tolist() # import pdb # pdb.set_trace() if len(decode_words) > self.maxlen: tem = [decode_words[0:self.maxlen]] else: tem = [[0] * (self.maxlen - len(decode_words)) + decode_words] pred_tokens = [ self.vocab[e] for e in decode_words[:-1] if e != self.eos_id and e != 0 ] pred_poems.append(pred_tokens) gen = '' for line in pred_poems: true_str = " ".join(line) gen = gen + true_str + '\n' return gen def sample(self, title, context, repeat, go_id, end_id): self.seq_encoder.eval() self.decoder.eval() title_last_hidden, _ = self.seq_encoder(title) context_last_hidden, _ = self.seq_encoder(context) c = torch.cat((title_last_hidden, context_last_hidden), 1) # (batch, 2 * hidden_size * 2) c_repeated = c.expand( repeat, -1) # 注意,我们输入的batch_size是1,这里复制repeat遍,为了后面的BLEU计算 prior_z = self.sample_code_prior( c_repeated) # c_repeated: (batch_size=repeat, 4*hidden_size) # (batch, max_len, 1) (batch_size, 1) sample_words, sample_lens = self.decoder.sampling( self.init_decoder_hidden(torch.cat((prior_z, c_repeated), 1)), self.maxlen, go_id, end_id, "greedy") return sample_words, sample_lens
class DeepAPI(nn.Module): ''' model. ''' def __init__(self, config, vocab_size): super(DeepAPI, self).__init__() self.vocab_size = vocab_size self.maxlen = config['maxlen'] self.clip = config['clip'] self.temp = config['temp'] self.desc_embedder = nn.Embedding(vocab_size, config['emb_size'], padding_idx=PAD_ID) self.api_embedder = nn.Embedding(vocab_size, config['emb_size'], padding_idx=PAD_ID) # utter encoder: encode response to vector self.encoder = Encoder(self.desc_embedder, config['emb_size'], config['n_hidden'], True, config['n_layers'], config['noise_radius']) self.decoder = Decoder(self.api_embedder, config['emb_size'], config['n_hidden'] * 2, vocab_size, config['use_attention'], 1, config['dropout']) # utter decoder: P(x|c,z) self.optimizer = optim.Adadelta(list(self.encoder.parameters()) + list(self.decoder.parameters()), lr=config['lr_ae'], rho=0.95) self.criterion_ce = nn.CrossEntropyLoss() def forward(self, descs, desc_lens, apiseqs, api_lens): c, hids = self.encoder(descs, desc_lens) output, _ = self.decoder(c, hids, None, apiseqs[:, :-1], (api_lens - 1)) # decode from z, c # output: [batch x seq_len x n_tokens] output = output.view(-1, self.vocab_size) # [batch*seq_len x n_tokens] dec_target = apiseqs[:, 1:].contiguous().view(-1) mask = dec_target.gt(0) # [(batch_sz*seq_len)] masked_target = dec_target.masked_select(mask) # output_mask = mask.unsqueeze(1).expand( mask.size(0), self.vocab_size) # [(batch_sz*seq_len) x n_tokens] masked_output = output.masked_select(output_mask).view( -1, self.vocab_size) loss = self.criterion_ce(masked_output / self.temp, masked_target) return loss def train_AE(self, descs, desc_lens, apiseqs, api_lens): self.encoder.train() self.decoder.train() loss = self.forward(descs, desc_lens, apiseqs, api_lens) self.optimizer.zero_grad() loss.backward() # `clip_grad_norm` to prevent exploding gradient in RNNs / LSTMs torch.nn.utils.clip_grad_norm_( list(self.encoder.parameters()) + list(self.decoder.parameters()), self.clip) self.optimizer.step() return {'train_loss': loss.item()} def valid(self, descs, desc_lens, apiseqs, api_lens): self.encoder.eval() self.decoder.eval() loss = self.forward(descs, desc_lens, apiseqs, api_lens) return {'valid_loss': loss.item()} def sample(self, descs, desc_lens, n_samples, mode='beamsearch'): self.encoder.eval() self.decoder.eval() c, hids = self.encoder(descs, desc_lens) if mode == 'beamsearch': sample_words, sample_lens, _ = self.decoder.beam_decode( c, hids, None, 12, self.maxlen, n_samples) #[batch_size x n_samples x seq_len] sample_words, sample_lens = sample_words[0], sample_lens[0] else: sample_words, sample_lens = self.decoder.sampling( c, hids, None, n_samples, self.maxlen, mode) return sample_words, sample_lens def adjust_lr(self): #self.lr_scheduler_AE.step() return None
class CVAE(nn.Module): def __init__(self, config, api, PAD_token=0): super(CVAE, self).__init__() self.vocab = api.vocab self.vocab_size = len(self.vocab) self.rev_vocab = api.rev_vocab self.go_id = self.rev_vocab["<s>"] self.eos_id = self.rev_vocab["</s>"] self.maxlen = config.maxlen self.clip = config.clip self.temp = config.temp self.full_kl_step = config.full_kl_step self.z_size = config.z_size self.init_w = config.init_weight self.softmax = nn.Softmax(dim=1) self.embedder = nn.Embedding(self.vocab_size, config.emb_size, padding_idx=PAD_token) # 对title, 每一句诗做编码 self.seq_encoder = Encoder(embedder=self.embedder, input_size=config.emb_size, hidden_size=config.n_hidden, bidirectional=True, n_layers=config.n_layers, noise_radius=config.noise_radius) # 先验网络的输入是 标题encode结果 + 上一句诗过encoder的结果 + 上一句情感过encoder的结果 self.prior_net = Variation(config.n_hidden * 4, config.z_size, dropout_rate=config.dropout, init_weight=self.init_w) # 后验网络,再加上x的2*hidden # self.post_net = Variation(config.n_hidden * 6, config.z_size*2) self.post_net = Variation(config.n_hidden * 6, config.z_size, dropout_rate=config.dropout, init_weight=self.init_w) # 词包loss的MLP self.bow_project = nn.Sequential( nn.Linear(config.n_hidden * 4 + config.z_size, 400), nn.LeakyReLU(), nn.Dropout(config.dropout), nn.Linear(400, self.vocab_size)) self.init_decoder_hidden = nn.Sequential( nn.Linear(config.n_hidden * 4 + config.z_size, config.n_hidden), nn.BatchNorm1d(config.n_hidden, eps=1e-05, momentum=0.1), nn.LeakyReLU()) # self.post_generator = nn.Sequential( # nn.Linear(config.z_size, config.z_size), # nn.BatchNorm1d(config.z_size, eps=1e-05, momentum=0.1), # nn.LeakyReLU(), # nn.Linear(config.z_size, config.z_size), # nn.BatchNorm1d(config.z_size, eps=1e-05, momentum=0.1), # nn.LeakyReLU(), # nn.Linear(config.z_size, config.z_size) # ) # self.post_generator.apply(self.init_weights) # self.prior_generator = nn.Sequential( # nn.Linear(config.z_size, config.z_size), # nn.BatchNorm1d(config.z_size, eps=1e-05, momentum=0.1), # nn.ReLU(), # nn.Dropout(config.dropout), # nn.Linear(config.z_size, config.z_size), # nn.BatchNorm1d(config.z_size, eps=1e-05, momentum=0.1), # nn.ReLU(), # nn.Dropout(config.dropout), # nn.Linear(config.z_size, config.z_size) # ) # self.prior_generator.apply(self.init_weights) self.init_decoder_hidden.apply(self.init_weights) self.bow_project.apply(self.init_weights) self.post_net.apply(self.init_weights) self.decoder = Decoder(embedder=self.embedder, input_size=config.emb_size, hidden_size=config.n_hidden, vocab_size=self.vocab_size, n_layers=1) # self.optimizer_lead = optim.Adam(list(self.seq_encoder.parameters())\ # + list(self.prior_net.parameters()), lr=config.lr_lead) self.optimizer_AE = optim.Adam(list(self.seq_encoder.parameters())\ + list(self.prior_net.parameters())\ # + list(self.prior_generator.parameters()) + list(self.post_net.parameters())\ # + list(self.post_generator.parameters()) + list(self.bow_project.parameters())\ + list(self.init_decoder_hidden.parameters())\ + list(self.decoder.parameters()), lr=config.lr_ae) # self.lr_scheduler_AE = optim.lr_scheduler.StepLR(self.optimizer_AE, step_size=10, gamma=0.6) self.criterion_ce = nn.CrossEntropyLoss() self.softmax = nn.Softmax(dim=1) self.criterion_sent_lead = nn.CrossEntropyLoss() def set_full_kl_step(self, kl_full_step): self.full_kl_step = kl_full_step def force_change_lr(self, new_init_lr_ae): self.optimizer_AE = optim.Adam(list(self.seq_encoder.parameters()) \ + list(self.prior_net.parameters()) \ # + list(self.prior_generator.parameters()) + list(self.post_net.parameters()) \ # + list(self.post_generator.parameters()) + list(self.bow_project.parameters()) \ + list(self.init_decoder_hidden.parameters()) \ + list(self.decoder.parameters()), lr=new_init_lr_ae) def init_weights(self, m): if isinstance(m, nn.Linear): m.weight.data.uniform_(-self.init_w, self.init_w) m.bias.data.fill_(0) def sample_code_post(self, x, c): # import pdb # pdb.set_trace() # mulogsigma = self.post_net(torch.cat((x, c), dim=1)) # mu, logsigma = torch.chunk(mulogsigma, chunks=2, dim=1) # batch_size = c.size(0) # std = torch.exp(0.5 * logsigma) # epsilon = to_tensor(torch.randn([batch_size, self.z_size])) # z = epsilon * std + mu z, mu, logsigma = self.post_net(torch.cat( (x, c), 1)) # 输入:(batch, 3*2*n_hidden) # z = self.post_generator(z) return z, mu, logsigma def sample_code_prior(self, c, sentiment_mask=None, mask_type=None): return self.prior_net(c, sentiment_mask=sentiment_mask, mask_type=mask_type) # # input: (batch, 3) # # target: (batch, 3) # def criterion_sent_lead(self, input, target): # softmax_res = self.softmax(input) # negative_log_softmax_res = -torch.log(softmax_res + 1e-10) # (batch, 3) # cross_entropy_loss = torch.sum(negative_log_softmax_res * target, dim=1) # avg_cross_entropy = torch.mean(cross_entropy_loss) # return avg_cross_entropy # sentiment_lead: (batch, 3) def train_AE(self, global_t, title, context, target, target_lens, sentiment_mask=None, sentiment_lead=None): self.seq_encoder.train() self.decoder.train() # batch_size = title.size(0) # 每一句的情感用第二个分类器来预测,输入当前的m_hidden,输出分类结果 title_last_hidden, _ = self.seq_encoder(title) context_last_hidden, _ = self.seq_encoder(context) # import pdb # pdb.set_trace() x, _ = self.seq_encoder(target[:, 1:], target_lens - 1) condition_prior = torch.cat((title_last_hidden, context_last_hidden), dim=1) z_prior, prior_mu, prior_logvar, pi, pi_final = self.sample_code_prior( condition_prior, sentiment_mask=sentiment_mask) z_post, post_mu, post_logvar = self.sample_code_post( x, condition_prior) # import pdb # pdb.set_trace() if sentiment_lead is not None: self.sent_lead_loss = self.criterion_sent_lead( input=pi, target=sentiment_lead) else: self.sent_lead_loss = 0 # if sentiment_lead is not None: # self.optimizer_lead.zero_grad() # self.sent_lead_loss.backward() # self.optimizer_lead.step() # return [('lead_loss', self.sent_lead_loss.item())], global_t final_info = torch.cat((z_post, condition_prior), dim=1) # reconstruct_loss # import pdb # pdb.set_trace() output = self.decoder(init_hidden=self.init_decoder_hidden(final_info), context=None, inputs=target[:, :-1]) flattened_output = output.view(-1, self.vocab_size) # flattened_output = self.softmax(flattened_output) + 1e-10 # flattened_output = torch.log(flattened_output) dec_target = target[:, 1:].contiguous().view(-1) mask = dec_target.gt(0) # 即判断target的token中是否有0(pad项) masked_target = dec_target.masked_select(mask) # 选出非pad项 output_mask = mask.unsqueeze(1).expand( mask.size(0), self.vocab_size) # [(batch_sz * seq_len) x n_tokens] masked_output = flattened_output.masked_select(output_mask).view( -1, self.vocab_size) self.rc_loss = self.criterion_ce(masked_output / self.temp, masked_target) # kl散度 kld = gaussian_kld(post_mu, post_logvar, prior_mu, prior_logvar) self.avg_kld = torch.mean(kld) self.kl_weights = min(global_t / self.full_kl_step, 1.0) # 退火 self.kl_loss = self.kl_weights * self.avg_kld # avg_bow_loss self.bow_logits = self.bow_project(final_info) # 说白了就是把target所有词的预测loss求个和 labels = target[:, 1:] label_mask = torch.sign(labels).detach().float() # 取符号变成正数,从而通过最小化来optimize # soft_result = self.softmax(self.bow_logits) + 1e-10 # bow_loss = -torch.log(soft_result).gather(1, labels) * label_mask bow_loss = -F.log_softmax(self.bow_logits, dim=1).gather( 1, labels) * label_mask sum_bow_loss = torch.sum(bow_loss, 1) self.avg_bow_loss = torch.mean(sum_bow_loss) self.aug_elbo_loss = self.avg_bow_loss + self.kl_loss + self.rc_loss self.total_loss = self.aug_elbo_loss + self.sent_lead_loss # 变相增加标注集的学习率 if sentiment_mask is not None: self.total_loss = self.total_loss * 13.33 self.optimizer_AE.zero_grad() self.total_loss.backward() self.optimizer_AE.step() avg_total_loss = self.total_loss.item() avg_lead_loss = 0 if sentiment_lead is None else self.sent_lead_loss.item( ) avg_aug_elbo_loss = self.aug_elbo_loss.item() avg_kl_loss = self.kl_loss.item() avg_rc_loss = self.rc_loss.data.item() avg_bow_loss = self.avg_bow_loss.item() global_t += 1 return [('avg_total_loss', avg_total_loss), ('avg_lead_loss', avg_lead_loss), ('avg_aug_elbo_loss', avg_aug_elbo_loss), ('avg_kl_loss', avg_kl_loss), ('avg_rc_loss', avg_rc_loss), ('avg_bow_loss', avg_bow_loss), ('kl_weight', self.kl_weights)], global_t def valid_AE(self, global_t, title, context, target, target_lens, sentiment_mask=None, sentiment_lead=None): self.seq_encoder.eval() self.decoder.eval() title_last_hidden, _ = self.seq_encoder(title) context_last_hidden, _ = self.seq_encoder(context) # import pdb # pdb.set_trace() x, _ = self.seq_encoder(target[:, 1:], target_lens - 1) condition_prior = torch.cat((title_last_hidden, context_last_hidden), dim=1) z_prior, prior_mu, prior_logvar, pi, pi_final = self.sample_code_prior( condition_prior, sentiment_mask=sentiment_mask) z_post, post_mu, post_logvar = self.sample_code_post( x, condition_prior) if sentiment_lead is not None: self.sent_lead_loss = self.criterion_sent_lead( input=pi, target=sentiment_lead) else: self.sent_lead_loss = 0 # if sentiment_lead is not None: # return [('valid_lead_loss', self.sent_lead_loss.item())], global_t final_info = torch.cat((z_post, condition_prior), dim=1) output = self.decoder(init_hidden=self.init_decoder_hidden(final_info), context=None, inputs=target[:, :-1]) flattened_output = output.view(-1, self.vocab_size) # flattened_output = self.softmax(flattened_output) + 1e-10 # flattened_output = torch.log(flattened_output) dec_target = target[:, 1:].contiguous().view(-1) mask = dec_target.gt(0) # 即判断target的token中是否有0(pad项) masked_target = dec_target.masked_select(mask) # 选出非pad项 output_mask = mask.unsqueeze(1).expand( mask.size(0), self.vocab_size) # [(batch_sz * seq_len) x n_tokens] masked_output = flattened_output.masked_select(output_mask).view( -1, self.vocab_size) self.rc_loss = self.criterion_ce(masked_output / self.temp, masked_target) # kl散度 kld = gaussian_kld(post_mu, post_logvar, prior_mu, prior_logvar) self.avg_kld = torch.mean(kld) self.kl_weights = min(global_t / self.full_kl_step, 1.0) # 退火 self.kl_loss = self.kl_weights * self.avg_kld # avg_bow_loss self.bow_logits = self.bow_project(final_info) # 说白了就是把target所有词的预测loss求个和 labels = target[:, 1:] label_mask = torch.sign(labels).detach().float() bow_loss = -F.log_softmax(self.bow_logits, dim=1).gather( 1, labels) * label_mask sum_bow_loss = torch.sum(bow_loss, 1) self.avg_bow_loss = torch.mean(sum_bow_loss) self.aug_elbo_loss = self.avg_bow_loss + self.kl_loss + self.rc_loss avg_aug_elbo_loss = self.aug_elbo_loss.item() avg_kl_loss = self.kl_loss.item() avg_rc_loss = self.rc_loss.data.item() avg_bow_loss = self.avg_bow_loss.item() avg_lead_loss = 0 if sentiment_lead is None else self.sent_lead_loss.item( ) return [('valid_lead_loss', avg_lead_loss), ('valid_aug_elbo_loss', avg_aug_elbo_loss), ('valid_kl_loss', avg_kl_loss), ('valid_rc_loss', avg_rc_loss), ('valid_bow_loss', avg_bow_loss)], global_t # batch_size = 1 只输入了一个标题 # test的时候,只有先验,没有后验,更没有所谓的kl散度 def test(self, title_tensor, title_words, mask_type=None): self.seq_encoder.eval() self.decoder.eval() assert title_tensor.size(0) == 1 tem = [[2, 3] + [0] * (self.maxlen - 2)] pred_poems = [] # 过滤掉标题中的<s> </s> 0,只为了打印 title_tokens = [ self.vocab[e] for e in title_words[0].tolist() if e not in [0, self.eos_id, self.go_id] ] pred_poems.append(title_tokens) for i in range(4): tem = to_tensor(np.array(tem)) context = tem if i == 0: context_last_hidden, _ = self.seq_encoder(title_tensor) else: context_last_hidden, _ = self.seq_encoder(context) title_last_hidden, _ = self.seq_encoder(title_tensor) condition_prior = torch.cat( (title_last_hidden, context_last_hidden), dim=1) z_prior, prior_mu, prior_logvar, _, _ = self.sample_code_prior( condition_prior, mask_type=mask_type) final_info = torch.cat((z_prior, condition_prior), 1) decode_words = self.decoder.testing( init_hidden=self.init_decoder_hidden(final_info), maxlen=self.maxlen, go_id=self.go_id, mode="greedy") decode_words = decode_words[0].tolist() if len(decode_words) >= self.maxlen: tem = [decode_words[0:self.maxlen]] else: tem = [[0] * (self.maxlen - len(decode_words)) + decode_words] pred_tokens = [ self.vocab[e] for e in decode_words[:-1] if e != self.eos_id and e != 0 and e != self.go_id ] pred_poems.append(pred_tokens) gen = "" for line in pred_poems: cur_line = " ".join(line) gen = gen + cur_line + '\n' return gen def sample(self, title, context, repeat, go_id, end_id): self.seq_encoder.eval() self.decoder.eval() assert title.size(0) == 1 title_last_hidden, _ = self.seq_encoder(title) context_last_hidden, _ = self.seq_encoder(context) condition_prior = torch.cat((title_last_hidden, context_last_hidden), 1) condition_prior_repeat = condition_prior.expand(repeat, -1) z_prior_repeat, _, _, _, _ = self.sample_code_prior( condition_prior_repeat) final_info = torch.cat((z_prior_repeat, condition_prior_repeat), dim=1) sample_words, sample_lens = self.decoder.sampling( init_hidden=self.init_decoder_hidden(final_info), maxlen=self.maxlen, go_id=self.go_id, eos_id=self.eos_id, mode="greedy") return sample_words, sample_lens
class DHMM(nn.Module): """ The Deep Markov Model """ def __init__(self, config ): super(DHMM, self).__init__() self.input_dim = config['input_dim'] self.z_dim = config['z_dim'] self.emission_dim = config['emission_dim'] self.trans_dim = config['trans_dim'] self.rnn_dim = config['rnn_dim'] self.clip_norm = config['clip_norm'] self.emitter = nn.Sequential( #Parameterizes the bernoulli observation likelihood `p(x_t|z_t)` nn.Linear(self.z_dim, self.emission_dim), nn.ReLU(), nn.Linear(self.emission_dim, self.emission_dim), nn.ReLU(), nn.Linear(self.emission_dim, self.input_dim), nn.Sigmoid() ) self.trans = GatedTransition(self.z_dim, self.trans_dim) self.postnet = PostNet(self.z_dim, self.rnn_dim) self.rnn = Encoder(None, self.input_dim, self.rnn_dim, False, 1) #nn.RNN(input_size=self.input_dim, hidden_size=self.rnn_dim, nonlinearity='relu', \ #batch_first=True, bidirectional=False, num_layers=1) # define a (trainable) parameters z_0 and z_q_0 that help define the probability distributions p(z_1) and q(z_1) # (since for t = 1 there are no previous latents to condition on) self.z_0 = nn.Parameter(torch.zeros(self.z_dim)) self.z_q_0 = nn.Parameter(torch.zeros(self.z_dim)) self.h_0 = nn.Parameter(torch.zeros(1, 1, self.rnn_dim)) self.optimizer = Adam(self.parameters(), lr=config['lr'], betas= (config['beta1'], config['beta2'])) def kl_div(self, mu1, logvar1, mu2=None, logvar2=None): one = torch.ones(1, device=mu1.device) if mu2 is None: mu2=torch.zeros(1, device=mu1.device) if logvar2 is None: logvar2=torch.zeros(1, device=mu1.device) return torch.sum(0.5*(logvar2-logvar1+(torch.exp(logvar1)+(mu1-mu2).pow(2))/torch.exp(logvar2)-one), 1) def infer(self, x, x_rev, x_lens): """ infer q(z_{1:T}|x_{1:T}) (i.e. the variational distribution) """ batch_size, _, x_dim = x.size() T_max = x_lens.max() h_0 = self.h_0.expand(1, batch_size, self.rnn.hidden_size).contiguous() _, rnn_out = self.rnn(x_rev, x_lens, h_0) # push the observed x's through the rnn; rnn_out = reverse_sequence(rnn_out, x_lens) # reverse the time-ordering in the hidden state and un-pack it rec_losses = torch.zeros((batch_size, T_max), device=x.device) kl_states = torch.zeros((batch_size, T_max), device=x.device) z_prev = self.z_q_0.expand(batch_size, self.z_q_0.size(0)) # set z_prev=z_q_0 to setup the recursive conditioning in q(z_t|...) for t in range(T_max): z_prior, z_prior_mu, z_prior_logvar = self.trans(z_prev)# p(z_t| z_{t-1}) z_t, z_mu, z_logvar = self.postnet(z_prev, rnn_out[:,t,:]) #q(z_t | z_{t-1}, x_{t:T}) kl = self.kl_div(z_mu, z_logvar, z_prior_mu, z_prior_logvar) kl_states[:,t] = self.kl_div(z_mu, z_logvar, z_prior_mu, z_prior_logvar) logit_x_t = self.emitter(z_t).contiguous() # p(x_t|z_t) rec_loss = nn.BCEWithLogitsLoss(reduction='none')(logit_x_t.view(-1), x[:,t,:].contiguous().view(-1)).view(batch_size, -1) rec_losses[:,t] = rec_loss.mean(dim=1) z_prev = z_t x_mask = sequence_mask(x_lens) x_mask = x_mask.gt(0).view(-1) rec_loss = rec_losses.view(-1).masked_select(x_mask).mean() kl_loss = kl_states.view(-1).masked_select(x_mask).mean() return rec_loss, kl_loss def train_AE(self, x, x_rev, x_lens, kl_anneal): self.rnn.train() # put the RNN back into training mode (i.e. turn on drop-out if applicable) rec_loss, kl_loss = self.infer(x, x_rev, x_lens) loss = rec_loss + kl_anneal*kl_loss self.optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(self.parameters(), self.clip_norm) self.optimizer.step() return {'train_loss_AE':loss.item(), 'train_loss_KL':kl_loss.item()} def valid(self, x, x_rev, x_lens): self.eval() rec_loss, kl_loss = self.infer(x, x_rev, x_lens) loss = rec_loss + kl_loss return loss def generate(self, x, x_rev, x_lens): """ generation model p(x_{1:T} | z_{1:T}) p(z_{1:T}) """ batch_size, _, x_dim = x.size() # number of time steps we need to process in the mini-batch T_max = x_lens.max() z_prev = self.z_0.expand(batch_size, self.z_0.size(0)) # set z_prev=z_0 to setup the recursive conditioning in p(z_t|z_{t-1}) for t in range(1, T_max + 1): # sample z_t ~ p(z_t | z_{t-1}) one time step at a time z_t, z_mu, z_logvar = self.trans(z_prev) # p(z_t | z_{t-1}) p_x_t = F.sigmoid(self.emitter(z_t)) # compute the probabilities that parameterize the bernoulli likelihood x_t = torch.bernoulli(p_x_t) #sample observe x_t according to the bernoulli distribution p(x_t|z_t) z_prev = z_t
class DA_RNN: def __init__(self, X_dim, Y_dim, encoder_hidden_size=64, decoder_hidden_size=64, linear_dropout=0, T=10, learning_rate=1e-5, batch_size=128, decay_rate=0.95): self.T = T self.decay_rate = decay_rate self.batch_size = batch_size self.X_dim = X_dim self.Y_dim = Y_dim self.encoder = Encoder(X_dim, encoder_hidden_size, T, linear_dropout).to(device) self.decoder = Decoder(encoder_hidden_size, decoder_hidden_size, T, linear_dropout, Y_dim).to(device) self.encoder_optim = torch.optim.Adam(params=self.encoder.parameters(), lr=learning_rate) self.decoder_optim = torch.optim.Adam(params=self.decoder.parameters(), lr=learning_rate) self.loss_func = torch.nn.MSELoss() def adjust_learning_rate(self): for enc_params, dec_params in zip(self.encoder_optim.param_groups, self.decoder_optim.param_groups): enc_params['lr'] = enc_params['lr'] * self.decay_rate dec_params['lr'] = dec_params['lr'] * self.decay_rate def ToTrainingBatches(self, X, Y, shuffle_slice=True): X_batches = [] Y_batches = [] N = X.shape[0] batch_num = math.ceil((N-self.T)/self.batch_size) i = self.T-1 for b in range(batch_num): # number of output = N - T + 1 # N is length, i is an index _batch_size = self.batch_size if N-i >= self.batch_size else N-i X_batch = np.empty((_batch_size, self.T, self.X_dim)) Y_batch = np.empty((_batch_size, self.Y_dim)) for b_idx in range(_batch_size): # print(N, i, i-self.T+1, i+1) # print(X[i-self.T+1:i+1].shape) X_batch[b_idx, :, :] = X[i-self.T+1:i+1] Y_batch[b_idx, :] = Y[i] i += 1 X_batches.append(X_batch) Y_batches.append(Y_batch) # TODO: zero padding # print(X.shape[0], np.sum([_.shape[0] for _ in X_batches])) if shuffle_slice: return shuffle(X_batches, Y_batches) else: return X_batches, Y_batches def ToTestingBatch(self, X): N = X.shape[0] X_batch = np.empty((N-self.T+1, self.T, self.X_dim)) i = self.T-1 b_idx = 0 while i < N: X_batch[b_idx, :, :] = X[i-self.T+1:i+1] i += 1 b_idx += 1 # TODO: zero padding return X_batch def train(self, X_train, Y_train, X_val, Y_val, epochs): if len(Y_train.shape) == 1: Y_train = Y_train[:, np.newaxis] if len(Y_val.shape) == 1: Y_val = Y_val[:, np.newaxis] assert len(X_train) == len(Y_train) assert len(X_val) == len(Y_val) epoch_loss_hist = [] iter_loss_hist = [] N = X_train.shape[0] for _e in range(epochs): X_train_batches, Y_train_batches = self.ToTrainingBatches(X_train, Y_train) for X_train_batch, Y_train_batch in zip(X_train_batches, Y_train_batches): X_train_loss = self.train_iter(X_train_batch, Y_train_batch) iter_loss_hist.append(np.mean(X_train_loss)) # decay learning rate # if _e % 20 == 0: # self.adjust_learning_rate() epoch_loss_hist.append(iter_loss_hist[-len(X_train_batches):]) if _e % 2 == 0: print("Epoch: {}\t".format(_e), end="") Y_val_pred = self.predict(X_val, on_train=True) Y_val_loss = self.loss_func(Y_val_pred, toTorch(Y_val[-(N-self.T+1):])) print("train_loss: {:.4f} val_loss: {:.4f}".format(X_train_loss, Y_val_loss)) return epoch_loss_hist, iter_loss_hist def train_iter(self, X, Y): self.encoder.train(), self.decoder.train() self.encoder_optim.zero_grad(), self.decoder_optim.zero_grad() _, X_encoded = self.encoder(toTorch(X)) Y_pred = self.decoder(X_encoded) loss = self.loss_func(Y_pred, toTorch(Y)) loss.backward() self.encoder_optim.step() self.decoder_optim.step() return loss.item() def predict(self, X, on_train=False): self.encoder.eval(), self.decoder.eval() X_batch = self.ToTestingBatch(X) _, X_encoded = self.encoder(toTorch(X_batch)) Y_pred = self.decoder(X_encoded) if on_train == False: Y_pred = Y_pred.cpu().detach().numpy() return Y_pred
class Seq2Seq(nn.Module): def __init__(self, config, api, pad_token=0): super(Seq2Seq, self).__init__() self.vocab = api.vocab self.vocab_size = len(self.vocab) self.rev_vocab = api.rev_vocab self.go_id = self.rev_vocab["<s>"] self.eos_id = self.rev_vocab["</s>"] self.maxlen = config.maxlen self.embedder = nn.Embedding(self.vocab_size, config.emb_size, padding_idx=pad_token) self.encoder = Encoder(self.embedder, config.emb_size, config.n_hidden, True, config.n_layers, config.noise_radius) self.decoder = AttnDecoder(config=config, embedder=self.embedder, vocab_size=self.vocab_size) self.criterion = nn.NLLLoss(reduction='none') self.optimizer = optim.Adam(list(self.encoder.parameters()) + list(self.decoder.parameters()), lr=config.lr_s2s) self.lr_scheduler_AE = optim.lr_scheduler.StepLR(self.optimizer, step_size=10, gamma=0.6) # 将title和诗句都限制成10个字,不够了pad,超了截取 # 每次输入进来一个batch的context和target def train_model(self, context, target, target_lens): self.encoder.train() self.decoder.train() self.optimizer.zero_grad() # (batch, 2 * n_hidden), (batch, len, 2*n_hidden) encoder_last_hidden, encoder_output = self.encoder(context) batch_size = encoder_last_hidden.size(0) hidden_size = encoder_last_hidden.size(1) // 2 # (1, batch, n_hidden) last_hidden = encoder_last_hidden.view(batch_size, 2, -1)[:, -1, :].squeeze().unsqueeze(0) # (batch, len, n_hidden) encoder_output = encoder_output.view(batch_size, -1, 2, hidden_size)[:, :, -1] decoder_input = target[:, :-1] # (batch, 9) decoder_target = target[:, 1:] # (batch, 9 step_losses = [] for i in range(self.maxlen - 1): decoded_result, last_hidden = \ self.decoder(decoder_input=decoder_input[:, i], init_hidden=last_hidden, encoder_output=encoder_output) step_loss = self.criterion(decoded_result, decoder_target[:, i]) step_losses.append(step_loss) stack_loss = torch.stack(step_losses, 1) # (batch, maxlen) sum_loss = torch.sum(stack_loss, 1) # 对每一行求和 avg_loss_batch = sum_loss / (target_lens.float() - 1) # 对每一行先求平均, decode时候每一行是9个字符 loss = torch.mean(avg_loss_batch) # 再对所有行一起求和 loss.backward() self.optimizer.step() return [('train_loss', loss.item())] def valid(self, context, target, target_lens): self.encoder.eval() self.decoder.eval() encoder_last_hidden, encoder_output = self.encoder(context) batch_size = encoder_last_hidden.size(0) hidden_size = encoder_last_hidden.size(1) // 2 # (1, batch, n_hidden) last_hidden = encoder_last_hidden.view(batch_size, 2, -1)[:, -1, :].squeeze().unsqueeze(0) # (batch, len, n_hidden) encoder_output = encoder_output.view(batch_size, -1, 2, hidden_size)[:, :, -1] decoder_input = target[:, :-1] # (batch, 9) decoder_target = target[:, 1:] # (batch, 9 step_losses = [] for i in range(self.maxlen - 1): decoded_result, last_hidden = \ self.decoder(decoder_input=decoder_input[:, i], init_hidden=last_hidden, encoder_output=encoder_output) step_loss = self.criterion(decoded_result, decoder_target[:, i]) step_losses.append(step_loss) stack_loss = torch.stack(step_losses, 1) # (batch, maxlen) sum_loss = torch.sum(stack_loss, 1) # 对每一行求和 avg_loss_batch = sum_loss / (target_lens.float() - 1) # 对每一行先求平均, decode时候每一行是9个字符 loss = torch.mean(avg_loss_batch) # 再对所有行一起求和 return [('valid_loss', loss.item())] def test(self, title, title_list, batch_size): self.encoder.eval() self.decoder.eval() assert title.size(0) == 1 tem = title[0][0: self.maxlen].unsqueeze(0) pred_poems = [] title_tokens = [self.vocab[e] for e in title_list[0].tolist() if e not in [0, self.eos_id, self.go_id]] pred_poems.append(title_tokens) for sent_id in range(4): context = tem if type(context) is list: vec_context = np.zeros((batch_size, self.maxlen), dtype=np.int64) for b_id in range(batch_size): vec_context[b_id, :] = np.array(context[b_id]) context = to_tensor(vec_context) encoder_last_hidden, encoder_output = self.encoder(context) batch_size = encoder_last_hidden.size(0) hidden_size = encoder_last_hidden.size(1) // 2 # (1, 1, n_hidden) last_hidden = encoder_last_hidden.view(batch_size, 2, -1)[:, -1, :].unsqueeze(0) # (batch, len, n_hidden) encoder_output = encoder_output.view(batch_size, -1, 2, hidden_size)[:, :, -1] # decode_words 是完整的一句诗 decode_words = self.decoder.testing(init_hidden=last_hidden, encoder_output=encoder_output, maxlen=self.maxlen, go_id=self.go_id, mode="greedy") decode_words = decode_words[0].tolist() # import pdb # pdb.set_trace() if len(decode_words) > self.maxlen: tem = [decode_words[0: self.maxlen]] else: tem = [[0] * (self.maxlen - len(decode_words)) + decode_words] pred_tokens = [self.vocab[e] for e in decode_words[:-1] if e != self.eos_id and e != 0] pred_poems.append(pred_tokens) gen = '' for line in pred_poems: true_str = " ".join(line) gen = gen + true_str + '\n' return gen def sample(self, title, context, repeat, go_id, end_id): self.encoder.eval() self.decoder.eval() encoder_last_hidden, encoder_output = self.encoder(context) batch_size = encoder_last_hidden.size(0) hidden_size = encoder_last_hidden.size(1) // 2 # (1, batch, n_hidden) last_hidden = encoder_last_hidden.view(batch_size, 2, -1)[:, -1].unsqueeze(0) # (batch, len, n_hidden) encoder_output = encoder_output.view(batch_size, -1, 2, hidden_size)[:, :, -1] last_hidden = last_hidden.expand(1, repeat, hidden_size) encoder_output = encoder_output.expand(repeat, -1, hidden_size) sample_words, sample_lens = self.decoder.sampling(last_hidden, encoder_output, self.maxlen, go_id, end_id, "greedy") return sample_words, sample_lens def adjust_lr(self): self.lr_scheduler_AE.step()