Exemple #1
0
    def output_spec(self):
        spec = super().output_spec()  # has 'output_text'
        spec.update({
            "input_tokens":
            lit_types.Tokens(parent="input_text"),
            "encoder_final_embedding":
            lit_types.Embeddings(),
            # If target text is given, the following will also be populated.
            "target_tokens":
            lit_types.Tokens(parent="target_text"),
            "pred_tokens":
            lit_types.TokenTopKPreds(align="target_tokens"),
        })
        if self.config.num_to_generate > 1:
            spec["output_text"] = lit_types.GeneratedTextCandidates(
                parent="target_text")

        if self.config.output_attention:
            # Add attention for each layer.
            for i in range(self.num_layers):
                spec[
                    f"encoder_layer_{i:d}_attention"] = lit_types.AttentionHeads(
                        align_in="input_tokens", align_out="input_tokens")
                spec[
                    f"decoder_layer_{i:d}_attention"] = lit_types.AttentionHeads(
                        align_in="target_tokens", align_out="target_tokens")
        return spec
Exemple #2
0
 def output_spec(self):
     spec = {
         # the "parent" keyword tells LIT which field in the input spec we should
         # compare this to when computing metrics.
         "pred_tokens": lit_types.TokenTopKPreds(align="tokens"),
         "tokens": lit_types.Tokens(parent="text"),  # all tokens
     }
     # Add attention and embeddings from each layer.
     for i in range(self.num_layers):
         spec[f"layer_{i:d}_attention"] = lit_types.AttentionHeads(
             align_in="tokens", align_out="tokens")
         spec[f"layer_{i:d}_avg_embedding"] = lit_types.Embeddings()
     return spec
Exemple #3
0
 def output_spec(self):
   spec = {
       "input_tokens": lit_types.Tokens(parent="input_text"),
       "generation": lit_types.GeneratedText(parent="target_text"),
       "encoder_final_embedding": lit_types.Embeddings(),
       # If target text is given, the following will also be populated.
       "target_tokens": lit_types.Tokens(parent="target_text"),
       "pred_tokens": lit_types.TokenTopKPreds(align="target_tokens"),
       "rougeL": lit_types.Scalar(),
   }
   if self.config.output_attention:
     # Add attention for each layer.
     for i in range(self.num_layers):
       spec[f"encoder_layer_{i:d}_attention"] = lit_types.AttentionHeads(
           align=("input_tokens", "input_tokens"))
       spec[f"decoder_layer_{i:d}_attention"] = lit_types.AttentionHeads(
           align=("target_tokens", "target_tokens"))
   return spec
Exemple #4
0
 def output_spec(self) -> lit_types.Spec:
     return {
         "src_tokens":
         lit_types.Tokens(parent="src_text"),
         "trg_text":
         lit_types.GeneratedText(parent="ref_text"),
         "trg_tokens":
         lit_types.Tokens(parent="trg_text"),
         "attention":
         lit_types.AttentionHeads(align_in="src_tokens",
                                  align_out="trg_tokens"),
         "pred_tokens":
         lit_types.TokenTopKPreds(align="trg_tokens", parent="trg_text"),
         "encoder_final_embedding":
         lit_types.Embeddings(),
         "ter":
         lit_types.Scalar(),
         "chrf3":
         lit_types.Scalar(),
     }
Exemple #5
0
 def output_spec(self):
     return {
         "tokens": lit_types.Tokens(parent="text"),
         "pred_tokens": lit_types.TokenTopKPreds(align="tokens"),
         "cls_emb": lit_types.Embeddings(),
     }