Esempio n. 1
0
 def __call__(self, q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat,
              r_w_bias, r_r_bias, r_s_bias, attn_mask, **kwargs):
   inputs = tf_utils.pack_inputs([
       q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,
       r_r_bias, r_s_bias, attn_mask
   ])
   return super(RelativeAttention, self).__call__(inputs, **kwargs)
Esempio n. 2
0
 def __call__(self, h, g, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed,
              attn_mask_h, attn_mask_g, mems, target_mapping, **kwargs):
   inputs = tf_utils.pack_inputs([
       h, g, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed, attn_mask_h,
       attn_mask_g, mems, target_mapping,
   ])
   return super(RelativeMultiheadAttention, self).__call__(inputs, **kwargs)
Esempio n. 3
0
 def __call__(self,
              input_word_ids,
              input_mask=None,
              input_type_ids=None,
              **kwargs):
   inputs = tf_utils.pack_inputs([input_word_ids, input_mask, input_type_ids])
   return super(BertModel, self).__call__(inputs, **kwargs)
Esempio n. 4
0
 def __call__(self,
              pooled_output,
              sequence_output=None,
              masked_lm_positions=None):
     inputs = tf_utils.pack_inputs(
         [pooled_output, sequence_output, masked_lm_positions])
     return super(BertPretrainLayer, self).__call__(inputs)
Esempio n. 5
0
 def __call__(self,
              lm_output,
              sentence_output=None,
              lm_label_ids=None,
              lm_label_weights=None,
              sentence_labels=None):
     inputs = tf_utils.pack_inputs([
         lm_output, sentence_output, lm_label_ids, lm_label_weights,
         sentence_labels
     ])
     return super(BertPretrainLossAndMetricLayer, self).__call__(inputs)
 def __call__(self, input_tensor, attention_mask=None, **kwargs):
     inputs = tf_utils.pack_inputs([input_tensor, attention_mask])
     return super(TransformerBlock, self).__call__(inputs, **kwargs)
 def __call__(self, from_tensor, to_tensor, attention_mask=None, **kwargs):
     inputs = tf_utils.pack_inputs([from_tensor, to_tensor, attention_mask])
     return super(Attention, self).__call__(inputs, **kwargs)
 def __call__(self, word_embeddings, token_type_ids=None, **kwargs):
     inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids])
     return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs)
Esempio n. 9
0
 def __call__(self, hidden, target, lookup_table, target_mask, **kwargs):
   inputs = tf_utils.pack_inputs([hidden, target, lookup_table, target_mask])
   return super(LMLossLayer, self).__call__(inputs, **kwargs)
Esempio n. 10
0
 def __call__(self, hidden, labels, **kwargs):
   inputs = tf_utils.pack_inputs([hidden, labels])
   return super(ClassificationLossLayer, self).__call__(inputs, **kwargs)
Esempio n. 11
0
 def __call__(self, word_embeddings, token_type_ids=None, **kwargs):
     inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids])
     return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs)  # pytype: disable=attribute-error  # typed-keras