Ejemplo n.º 1
0
 def __call__(self,
              input_word_ids,
              input_mask=None,
              input_type_ids=None,
              **kwargs):
     inputs = tf_utils.pack_inputs(
         [input_word_ids, input_mask, input_type_ids])
     return super(AlbertModel, self).__call__(inputs, **kwargs)
Ejemplo n.º 2
0
 def __call__(self,
              pooled_output,
              sequence_output=None,
              masked_lm_positions=None,
              **kwargs):
   inputs = tf_utils.pack_inputs(
       [pooled_output, sequence_output, masked_lm_positions])
   return super(PretrainLayer, self).__call__(inputs, **kwargs)
Ejemplo n.º 3
0
 def __call__(self,
              sequence_output,
              p_mask,
              cls_index,
              start_positions=None,
              **kwargs):
     inputs = tf_utils.pack_inputs(
         [sequence_output, p_mask, cls_index, start_positions])
     return super(ALBertQALayer, self).__call__(inputs, **kwargs)
Ejemplo n.º 4
0
 def __call__(self,
              lm_output,
              lm_label_ids=None,
              lm_label_weights=None,
              **kwargs):
   inputs = tf_utils.pack_inputs([
       lm_output, lm_label_ids, lm_label_weights
   ])
   return super(PretrainLossAndMetricLayer, self).__call__(
       inputs, **kwargs)
Ejemplo n.º 5
0
 def __call__(self, input_tensor, attention_mask=None, **kwargs):
     inputs = tf_utils.pack_inputs([input_tensor, attention_mask])
     return super(Transformer, self).__call__(inputs=inputs, **kwargs)
Ejemplo n.º 6
0
 def __call__(self, from_tensor, to_tensor, attention_mask=None, **kwargs):
     inputs = tf_utils.pack_inputs([from_tensor, to_tensor, attention_mask])
     return super(Attention, self).__call__(inputs, **kwargs)
Ejemplo n.º 7
0
 def __call__(self, word_embeddings, token_type_ids=None, **kwargs):
     inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids])
     return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs)