示例#1
0
    def weight_inputs(self, inputs: BatchEncoding) -> List[float]:
        device = self.transformer.model.device
        all_attentions = self.transformer.model(**inputs.to(device),
                                                output_attentions=True)[-1]
        weights = self._aggregate_attentions(all_attentions, self.heads,
                                             self.agg_strategy).detach().cpu()
        if self.normalize_weights:
            norm = torch.linalg.norm(weights, ord=2)
            weights = torch.tensor(weights) / torch.max(
                norm, 1e-10 * torch.ones_like(norm))

        return weights.detach().cpu().numpy().tolist()
示例#2
0
 def embed_inputs(self, inputs: BatchEncoding) -> List[List[List[float]]]:
     device = self.transformer.model.device
     outputs = self.transformer.model(**inputs.to(device),
                                      output_hidden_states=True)[-1]
     embeddings_t = self._embedings_from_outputs(outputs)
     return embeddings_t.detach().cpu().tolist()