Esempio n. 1
0
    def output_spec(self) -> Spec:
        ret = {"tokens": lit_types.Tokens()}
        ret["tokens_" + self.config.text_a_name] = lit_types.Tokens()
        if self.config.text_b_name:
            ret["tokens_" + self.config.text_b_name] = lit_types.Tokens()
        if self.is_regression:
            ret["score"] = lit_types.RegressionScore(
                parent=self.config.label_name)
        else:
            ret["probas"] = lit_types.MulticlassPreds(
                parent=self.config.label_name,
                vocab=self.config.labels,
                null_idx=self.config.null_label_idx)
        ret["cls_emb"] = lit_types.Embeddings()

        # Gradients, if requested.
        if self.config.compute_grads:
            ret["token_grad_" +
                self.config.text_a_name] = lit_types.TokenGradients(
                    align="tokens_" + self.config.text_a_name)
            if self.config.text_b_name:
                ret["token_grad_" +
                    self.config.text_b_name] = lit_types.TokenGradients(
                        align="tokens_" + self.config.text_b_name)

        # Attention heads, one field for each layer.
        for i in range(self.model.config.num_hidden_layers):
            ret[f"layer_{i}/attention"] = lit_types.AttentionHeads(
                align=("tokens", "tokens"))

        return ret
Esempio n. 2
0
  def output_spec(self) -> Spec:
    ret = {"tokens": lit_types.Tokens()}
    ret["tokens_" + self.config.text_a_name] = lit_types.Tokens(
        parent=self.config.text_a_name)
    if self.config.text_b_name:
      ret["tokens_" + self.config.text_b_name] = lit_types.Tokens(
          parent=self.config.text_b_name)
    if self.is_regression:
      ret["score"] = lit_types.RegressionScore(parent=self.config.label_name)
    else:
      ret["probas"] = lit_types.MulticlassPreds(
          parent=self.config.label_name,
          vocab=self.config.labels,
          null_idx=self.config.null_label_idx)
    ret["cls_emb"] = lit_types.Embeddings()
    # Average embeddings, one per layer including embeddings.
    for i in range(1 + self.model.config.num_hidden_layers):
      ret[f"layer_{i}/avg_emb"] = lit_types.Embeddings()

    ret["cls_grad"] = lit_types.Gradients(
        grad_for="cls_emb", grad_target_field_key="grad_class")

    # The input_embs_ and grad_class fields are used for Integrated Gradients.
    ret["input_embs_" + self.config.text_a_name] = lit_types.TokenEmbeddings(
        align="tokens_" + self.config.text_a_name)
    if self.config.text_b_name:
      ret["input_embs_" + self.config.text_b_name] = lit_types.TokenEmbeddings(
          align="tokens_" + self.config.text_b_name)

    # Gradients, if requested.
    if self.config.compute_grads:
      ret["grad_class"] = lit_types.CategoryLabel(required=False,
                                                  vocab=self.config.labels)
      ret["token_grad_" + self.config.text_a_name] = lit_types.TokenGradients(
          align="tokens_" + self.config.text_a_name,
          grad_for="input_embs_" + self.config.text_a_name,
          grad_target_field_key="grad_class")
      if self.config.text_b_name:
        ret["token_grad_" + self.config.text_b_name] = lit_types.TokenGradients(
            align="tokens_" + self.config.text_b_name,
            grad_for="input_embs_" + self.config.text_b_name,
            grad_target_field_key="grad_class")

    # Attention heads, one field for each layer.
    for i in range(self.model.config.num_hidden_layers):
      ret[f"layer_{i+1}/attention"] = lit_types.AttentionHeads(
          align_in="tokens", align_out="tokens")
    return ret
Esempio n. 3
0
 def output_spec(self) -> lit_types.Spec:
     return {
         "tokens": lit_types.Tokens(),
         "logits": lit_types.RegressionScore(),
         "cls_emb": lit_types.Embeddings(),
         "token_grad_sentence": lit_types.TokenGradients(align="tokens")
     }
Esempio n. 4
0
 def output_spec(self) -> lit_types.Spec:
     return {
         "tokens": lit_types.Tokens(),
         "probas": lit_types.MulticlassPreds(parent="label", vocab=self._labels),
         "cls_emb": lit_types.Embeddings(),
         "token_grad_sentence": lit_types.TokenGradients(align="tokens")
     }
Esempio n. 5
0
 def output_spec(self):
   return {'probas': lit_types.MulticlassPreds(
       parent='label',
       vocab=['0', '1'],
       null_idx=0),
           'input_embs': lit_types.TokenEmbeddings(align='tokens'),
           'input_embs_grad': lit_types.TokenGradients(align='tokens',
                                                       grad_for='input_embs',
                                                       grad_target='grad_class'
                                                       ),
           'tokens': lit_types.Tokens(),
           'grad_class': lit_types.CategoryLabel(vocab=['0', '1'])
           }
 def output_spec(self) -> lit_types.Spec:
     """Give the output specifications."""
     ret = {
         "tokens":  lit_types.Tokens(),
         "probas":  lit_types.MulticlassPreds(parent="label", vocab=self.LABELS),
         "cls_emb": lit_types.Embeddings()
     }
     
     # Gradients, if requested.
     if self.compute_grads:
         ret["token_grad_sentence"] = lit_types.TokenGradients(align="tokens")
     
     # Attention heads, one field for each layer.
     for i in range(self.model.config.num_hidden_layers):
         ret[f"layer_{i}/attention"] = lit_types.AttentionHeads(align=("tokens", "tokens"))
     return ret
Esempio n. 7
0
 def output_spec(self) -> lit_types.Spec:
     spec = {
         "tokens":
         lit_types.Tokens(),
         "bio_tags":
         lit_types.SequenceTags(align="tokens"),
         "token_ids":
         lit_types.SequenceTags(align="tokens"),
         "grads":
         lit_types.TokenGradients(align="tokens"),
         "probas":
         lit_types.MulticlassPreds(parent="bio_tags", vocab=self.LABELS)
     }
     for i in range(self.model.config.num_hidden_layers):
         spec[f'layer_{i}/attention'] = lit_types.AttentionHeads(
             align=("tokens", "tokens"))
     return spec