def __call__(self, batch): myid, text_input, is_humor = map(list, zip(*batch)) input_lengths = torch.tensor( [len(s) for s in text_input], device=self.device) # attention mask max_length = max(input_lengths) inputs_pad_mask = pad_mask(input_lengths, max_length=max_length, device=self.device) # Pad inputs and targets padded_inputs = ( pad_sequence(text_input, batch_first=True, padding_value=self.pad_indx) .to(self.device)) return myid, padded_inputs, inputs_pad_mask
def __call__(self, batch): myid, text_input, humor_rating,humor_contr = map(list, zip(*batch)) input_lengths = torch.tensor( [len(s) for s in text_input], device=self.device) # attention mask max_length = max(input_lengths) inputs_pad_mask = pad_mask(input_lengths, max_length=max_length, device=self.device) # Pad inputs and targets padded_inputs = ( pad_sequence(text_input, batch_first=True, padding_value=self.pad_indx) .to(self.device)) humor_rating = mktensor(humor_rating, dtype=torch.float) humor_contr = mktensor(humor_contr, dtype=torch.long) return padded_inputs,inputs_pad_mask, humor_rating, humor_contr