def generate_sequence(self, batch, model, data_loader, start_idx, end_len): XMB = batch["sequences"][:, :start_idx] MMB = batch["attention_mask"][:, :start_idx] XMB = model_utils.prepare_position_embeddings( self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1)) lm_probs = F.log_softmax(model( XMB.unsqueeze(1), sequence_mask=MMB), dim=-1) values, indices = lm_probs[:, -1, :].max(dim=-1) seqs = indices.clone().unsqueeze(1) loss = values counts = 1 next_pos = XMB[:, -1:, 1] + 1 next_x = torch.cat((indices.view(-1, 1), next_pos), -1).unsqueeze(1) XMB = torch.cat((XMB, next_x), 1) MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1) # Sample from top k for _ in range(self.opt.eval.smax): lm_probs = F.log_softmax(model( XMB.unsqueeze(1), sequence_mask=MMB), dim=-1) # Sample from top k values, next_idx = lm_probs[:, -1, :].max(dim=-1) loss += values counts += 1 next_idx = next_idx.unsqueeze(1) seqs = torch.cat([seqs, next_idx], 1) if (next_idx.item() == self.end_token) or (_ == end_len - 1): break XMB, MMB = self.append_batch(XMB, next_idx, MMB) beams = [] for beam in seqs: beams.append(" ".join("".join( [data_loader.vocab_decoder[tok.item()].replace( '</w>', ' ').replace('\n', '') for tok in beam if tok != self.end_token]).split())) sampling_result = { "sequence": beams[0], "beams": beams, "beam_losses": [loss.item()], "loss": loss.item(), "beam_lengths": [counts], "length": counts } return sampling_result
def _comet_embs(self, comet_input, comet_mask): batch_size, num_comet_rels = comet_input.size(0), comet_input.size(1) comet_input = comet_input.view(batch_size * num_comet_rels, -1) comet_mask = comet_mask.view(batch_size * num_comet_rels, -1).float() comet_input_with_positions = prepare_position_embeddings( None, self.comet_encoder.encoder, comet_input.unsqueeze(-1)) comet_embs = self.comet_model.transformer( comet_input_with_positions.unsqueeze(1), sequence_mask=comet_mask)[:, -1, :] return comet_embs.view(batch_size, num_comet_rels, -1)
def batch_conceptnet_generate(opt, nums, losses, batch_variables, eval_mode=False, tracking_mode=False): data_loader = batch_variables["data"] model = batch_variables["model"] split = batch_variables["split"] category = batch_variables["category"] batch, reset = data_loader.sample_batch(split, bs=opt.train.dynamic.bs, cat=category) input_ = model_utils.prepare_position_embeddings( opt, data_loader.vocab_encoder, batch["sequences"].unsqueeze(-1)) attention_mask = batch["attention_mask"] loss_mask = batch["loss_mask"] targets = input_.squeeze(0)[:, 1:, 0].contiguous().view(-1) loss, dist = mle_steps(opt.net.model, model, input_[:, :-1, :], targets, attention_mask[:, :-1], loss_reduction="none") # Set loss name if not eval_mode or batch_variables["category"] == "positive": micro_name = "total_micro" macro_name = "total_macro" else: micro_name = "negative_micro" macro_name = "negative_macro" length = loss_mask.sum(1) bs = input_.size(0) final_loss = (loss * loss_mask).sum(1) update_generation_losses(losses, nums, micro_name, macro_name, bs, length, (loss * loss_mask).sum(1), split) final_loss = final_loss / length outputs = {"loss": final_loss.sum(), "nums": nums, "reset": reset} if tracking_mode: outputs["tracking"] = final_loss.squeeze().tolist() return outputs
tmp = batch["sequences"][:, :context_size_event + 1] XMB = batch["sequences"][:, :context_size_event + 1] Ref = batch["sequences"][:, context_size_event + 1:] MMB = batch["attention_mask"][:, :context_size_event + 1] init = "".join([ text_encoder.decoder[i].replace('</w>', ' ').replace("<blank>", "___ ") for i in XMB[:, :-1].squeeze().tolist() if i ]) attr = text_encoder.decoder[XMB[:, -1].item()].strip("<>") XMB = model_utils.prepare_position_embeddings(opt, text_encoder.encoder, XMB.unsqueeze(-1)) sequence_all["event"] = init sequence_all["effect_type"] = attr lm_probs = lm_model(XMB.unsqueeze(1), sequence_mask=MMB) dist = lm_probs[:, -1, :].squeeze() values, indices = lm_probs[:, -1, :].topk(args.k) seqs = indices.t().clone() ended = (seqs == end_token).float() XMB = XMB.repeat(args.k, 1, 1) MMB = MMB.repeat(args.k, 1) next_pos = XMB[:, -1:, 1] + 1
def generate_sequence(self, batch, model, data_loader, start_idx, end_len): # start_idx = context_size_event + 1 # start_idx = max_e1 + max_r # end_idx = context_size_effect - 1 # end_idx = max_e2 XMB = batch["sequences"][:, :start_idx] MMB = batch["attention_mask"][:, :start_idx] XMB = model_utils.prepare_position_embeddings( self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1)) tokens = [] beam_losses = [] # Beam Search beam_lls, beam_toks, beam_seqs = None, None, None lm_probs = F.log_softmax(model(XMB.unsqueeze(1), sequence_mask=MMB), dim=-1) dist = lm_probs[:, -1, :].squeeze() beam_lls, beam_toks = dist.topk(self.opt.eval.bs) beam_losses.append(beam_lls) ended = (beam_toks == self.end_token).float() counts = (2 - ended) beam_toks = beam_toks.unsqueeze(1) beam_seqs = beam_toks.clone() XMB = XMB.repeat(self.opt.eval.bs, 1, 1) MMB = MMB.repeat(self.opt.eval.bs, 1) next_pos = XMB[:, -1:, 1] + 1 next_x = torch.cat((beam_toks, next_pos), -1).unsqueeze(1) XMB = torch.cat((XMB, next_x), 1) MMB = torch.cat( [MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1) for _ in range(end_len): # Compute distribution for current beam lm_probs = F.log_softmax(model(XMB.unsqueeze(1), sequence_mask=MMB), dim=-1) dist = lm_probs[:, -1, :].squeeze() # get hypothesis tokens for distribution hyp_beam_lls, hyp_beam_toks = dist.topk(self.opt.eval.bs) # Compute masks and expand beam expanded_ended = ended.unsqueeze(1).repeat(1, self.opt.eval.bs) hypothesis_mask = expanded_ended * \ self.kill_mask + (1 - expanded_ended) current_beam_lls = beam_lls.unsqueeze(1).repeat( 1, self.opt.eval.bs).view(self.opt.eval.bs**2) # Compute losses of hypotheses, masking those that have ended hyp_beam_lls = (hyp_beam_lls.view(self.opt.eval.bs**2) * hypothesis_mask.view(-1)) + current_beam_lls # Get normalizer for sequences temp_counts = counts.unsqueeze(1).repeat(1, self.opt.eval.bs).view( self.opt.eval.bs**2) # Select best beams with lowest aggregate loss beam_lls, top_beam_idxs = (hyp_beam_lls / temp_counts).topk( self.opt.eval.bs) # Update placements in beam based on selecetion beam_losses = [ i.index_select(0, top_beam_idxs // self.opt.eval.bs) for i in beam_losses ] ended = ended.index_select(0, top_beam_idxs // self.opt.eval.bs) counts = temp_counts.index_select(0, top_beam_idxs) # Save beam losses beam_losses.append(beam_lls * counts) # Update beam tokens ended_mask = (1 - ended).long() end_replacement = (self.end_token * ended).long() next_toks = hyp_beam_toks.view(-1)[top_beam_idxs] beam_toks = next_toks * ended_mask + end_replacement # Update ended and counts ended = ended + (beam_toks == self.end_token).float() * (1 - ended) counts = counts + (1 - ended) # Update beam sequences beam_seqs = beam_seqs.t().repeat(self.opt.eval.bs, 1).t().contiguous().view( self.opt.eval.bs**2, -1)[top_beam_idxs] beam_seqs = torch.cat((beam_seqs, beam_toks.unsqueeze(1)), dim=1) # I have no idea what's going on but Ari's on point with it XMB = XMB.transpose(0, 1).transpose(1, 2).repeat( self.opt.eval.bs, 1, 1).transpose(2, 1).transpose( 1, 0).contiguous().view(self.opt.eval.bs**2, XMB.size(1), XMB.size(2))[top_beam_idxs] XMB, MMB = self.append_batch(XMB, beam_toks, MMB) if (beam_toks == self.end_token).sum().item() == self.opt.eval.bs: break beams = [] for beam in beam_seqs: beams.append(" ".join("".join([ data_loader.vocab_decoder[tok.item()].replace( '</w>', ' ').replace('\n', '') for tok in beam if tok != self.end_token ]).split())) sampling_result = { "sequence": beams[0], "beams": beams, "beam_losses": beam_lls.tolist(), "loss": beam_lls[0].item(), "beam_lengths": counts.tolist(), "length": counts[0].item() } return sampling_result
def generate_sequence(self, batch, model, data_loader, start_idx, end_len): # start_idx = context_size_event + 1 # start_idx = max_e1 + max_r # end_idx = context_size_effect - 1 # end_idx = max_e2 XMB = batch["sequences"][:, :start_idx] MMB = batch["attention_mask"][:, :start_idx] XMB = model_utils.prepare_position_embeddings( self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1)) lm_probs = F.log_softmax(model(XMB.unsqueeze(1), sequence_mask=MMB), dim=-1) values, indices = lm_probs[:, -1, :].topk(self.opt.eval.k) seqs = indices.t().clone() losses = -values.view(-1, 1) ended = (seqs == self.end_token).float() counts = (1 - ended) XMB = XMB.repeat(self.opt.eval.k, 1, 1) MMB = MMB.repeat(self.opt.eval.k, 1) next_pos = XMB[:, -1:, 1] + 1 next_x = torch.cat((indices.view(self.opt.eval.k, -1), next_pos), -1).unsqueeze(1) XMB = torch.cat((XMB, next_x), 1) MMB = torch.cat( [MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1) # Sample from top k for _ in range(end_len): lm_probs = F.log_softmax(model(XMB.unsqueeze(1), sequence_mask=MMB), dim=-1) # Sample from top k values, indices = lm_probs[:, -1, :].topk(self.opt.eval.k) choice = torch.multinomial(values.exp(), 1) next_idx = indices.gather(-1, choice) ended = ended + (next_idx == self.end_token).float() * (1 - ended) next_idx = next_idx * (1 - ended).long() + \ ended.long() * self.end_token counts += (1 - ended) seqs = torch.cat([seqs, next_idx], 1) if ended.sum().item() == self.opt.eval.k: break losses -= values.gather(-1, choice) * (1 - ended) XMB, MMB = self.append_batch(XMB, next_idx, MMB) beams = [] for beam in seqs: beams.append(" ".join("".join([ data_loader.vocab_decoder[tok.item()].replace( '</w>', ' ').replace('\n', '') for tok in beam if tok != self.end_token ]).split())) sampling_result = { "sequence": beams[0], "beams": beams, "beam_losses": losses.squeeze().tolist(), "loss": losses[0].item(), "beam_lengths": counts.long().squeeze().tolist(), "length": counts[0].long().item() } return sampling_result