def _forward(self, is_training, split_placeholders, **kwargs): encoder = ALBERTEncoder( config=self.albert_config, is_training=is_training, input_ids=split_placeholders['input_ids'], input_mask=split_placeholders['input_mask'], token_type_ids=split_placeholders['segment_ids'], scope='bert', drop_pooler=self._drop_pooler, **kwargs) decoder = ALBERTDecoder( albert_config=self.albert_config, is_training=is_training, encoder=encoder, masked_lm_positions=split_placeholders['masked_lm_positions'], masked_lm_ids=split_placeholders['masked_lm_ids'], masked_lm_weights=split_placeholders['masked_lm_weights'], sentence_order_labels=\ split_placeholders.get('sentence_order_labels'), sample_weight=split_placeholders.get('sample_weight'), scope_lm='cls/predictions', scope_cls='cls/seq_relationship', name='SOP', **kwargs) (total_loss, losses, probs, preds) = decoder.get_forward_outputs() return (total_loss, losses, probs, preds)
def _get_encoder(model_name): if model_name == 'bert' or model_name == 'roberta': sketchy_encoder = BERTEncoder( bert_config=self.bert_config, is_training=is_training, input_ids=split_placeholders['input_ids'], input_mask=split_placeholders['input_mask'], segment_ids=split_placeholders['segment_ids'], scope='bert', **kwargs) elif model_name == 'albert': sketchy_encoder = ALBERTEncoder( albert_config=self.bert_config, is_training=is_training, input_ids=split_placeholders['input_ids'], input_mask=split_placeholders['input_mask'], segment_ids=split_placeholders['segment_ids'], scope='bert', **kwargs) elif model_name == 'electra': sketchy_encoder = BERTEncoder( bert_config=self.bert_config, is_training=is_training, input_ids=split_placeholders['input_ids'], input_mask=split_placeholders['input_mask'], segment_ids=split_placeholders['segment_ids'], scope='electra', **kwargs) return sketchy_encoder
def _forward(self, is_training, split_placeholders, **kwargs): encoder = ALBERTEncoder(albert_config=self.albert_config, is_training=is_training, input_ids=split_placeholders['input_ids'], input_mask=split_placeholders['input_mask'], segment_ids=split_placeholders['segment_ids'], scope='bert', **kwargs) encoder_output = encoder.get_sequence_output() decoder = MRCDecoder( is_training=is_training, input_tensor=encoder_output, label_ids=split_placeholders['label_ids'], sample_weight=split_placeholders.get('sample_weight'), scope='mrc', **kwargs) (total_loss, losses, probs, preds) = decoder.get_forward_outputs() return (total_loss, losses, probs, preds)
def _forward(self, is_training, split_placeholders, **kwargs): encoder = ALBERTEncoder(albert_config=self.albert_config, is_training=is_training, input_ids=split_placeholders['input_ids'], input_mask=split_placeholders['input_mask'], segment_ids=split_placeholders['segment_ids'], scope='bert', drop_pooler=self._drop_pooler, **kwargs) encoder_output = encoder.get_pooled_output() decoder = CLSDecoder( is_training=is_training, input_tensor=encoder_output, label_ids=split_placeholders['label_ids'], label_size=self.label_size, sample_weight=split_placeholders.get('sample_weight'), scope='cls/seq_relationship', **kwargs) (total_loss, losses, probs, preds) = decoder.get_forward_outputs() return (total_loss, losses, probs, preds)