def __call__(self, input_word_ids, input_mask=None, input_type_ids=None, **kwargs): inputs = tf_utils.pack_inputs( [input_word_ids, input_mask, input_type_ids]) return super(AlbertModel, self).__call__(inputs, **kwargs)
def __call__(self, pooled_output, sequence_output=None, masked_lm_positions=None, **kwargs): inputs = tf_utils.pack_inputs( [pooled_output, sequence_output, masked_lm_positions]) return super(PretrainLayer, self).__call__(inputs, **kwargs)
def __call__(self, sequence_output, p_mask, cls_index, start_positions=None, **kwargs): inputs = tf_utils.pack_inputs( [sequence_output, p_mask, cls_index, start_positions]) return super(ALBertQALayer, self).__call__(inputs, **kwargs)
def __call__(self, lm_output, lm_label_ids=None, lm_label_weights=None, **kwargs): inputs = tf_utils.pack_inputs([ lm_output, lm_label_ids, lm_label_weights ]) return super(PretrainLossAndMetricLayer, self).__call__( inputs, **kwargs)
def __call__(self, input_tensor, attention_mask=None, **kwargs): inputs = tf_utils.pack_inputs([input_tensor, attention_mask]) return super(Transformer, self).__call__(inputs=inputs, **kwargs)
def __call__(self, from_tensor, to_tensor, attention_mask=None, **kwargs): inputs = tf_utils.pack_inputs([from_tensor, to_tensor, attention_mask]) return super(Attention, self).__call__(inputs, **kwargs)
def __call__(self, word_embeddings, token_type_ids=None, **kwargs): inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids]) return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs)