def get_diff_loss(bert_config, input_tensor, masked_lm_positions, masked_lm_weights, loss_base, loss_target): base_prob = tf.exp(-loss_base) target_prob = tf.exp(-loss_target) prob_diff = base_prob - target_prob input_tensor = bc.gather_indexes(input_tensor, masked_lm_positions) with tf.compat.v1.variable_scope("diff_loss"): hidden = bc.dense(bert_config.hidden_size, bc.create_initializer(bert_config.initializer_range), bc.get_activation( bert_config.hidden_act))(input_tensor) logits = bc.dense(1, bc.create_initializer( bert_config.initializer_range))(hidden) logits = tf.reshape(logits, prob_diff.shape) per_example_loss = tf.abs(prob_diff - logits) per_example_loss = tf.cast(masked_lm_weights, tf.float32) * per_example_loss losses = tf.reduce_sum(per_example_loss, axis=1) loss = tf.reduce_mean(losses) return loss, per_example_loss, logits
def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.compat.v1.variable_scope("cls/seq_relationship"): output_weights = tf.compat.v1.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=bert_common.create_initializer( bert_config.initializer_range)) output_bias = tf.compat.v1.get_variable( "output_bias", shape=[2], initializer=tf.compat.v1.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum( input_tensor=one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(input_tensor=per_example_loss) return (loss, per_example_loss, log_probs)
def __init__(self, config): hidden_size = config.hidden_size initializer = bc.create_initializer(config.initializer_range) attention_head_size = int(hidden_size / config.num_attention_heads) self.attention_head_size = attention_head_size num_attention_heads = config.num_attention_heads self.num_attention_heads = num_attention_heads self.attention_probs_dropout_prob = config.attention_probs_dropout_prob self.hidden_dropout_prob = config.hidden_dropout_prob with tf.compat.v1.variable_scope("attention"): with tf.compat.v1.variable_scope("self"): self.query_layer = tf.keras.layers.Dense( num_attention_heads * attention_head_size, activation=None, name="query", kernel_initializer=initializer) self.key_layer = tf.keras.layers.Dense( num_attention_heads * attention_head_size, activation=None, name="key", kernel_initializer=initializer) self.value_layer = tf.keras.layers.Dense( num_attention_heads * attention_head_size, activation=None, name="value", kernel_initializer=initializer) with tf.compat.v1.variable_scope("output"): self.output_layer = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=initializer, )
def sequence_index_prediction(bert_config, lookup_idx, input_tensor): logits = bert_common.dense(2, bert_common.create_initializer(bert_config.initializer_range))(input_tensor) log_probs = tf.nn.softmax(logits, axis=2) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=lookup_idx) per_example_loss = tf.reduce_sum(losses, axis=1) loss = tf.reduce_mean(per_example_loss) return loss, per_example_loss, log_probs
def get_pooler(sequence_output, config): with tf.compat.v1.variable_scope("pooler"): first_token_tensor = tf.squeeze(sequence_output[:, 0:1, :], axis=1) pooled_output = tf.keras.layers.Dense( config.hidden_size, activation=tf.keras.activations.tanh, kernel_initializer=bc.create_initializer( config.initializer_range))(first_token_tensor) return pooled_output
def __init__(self, bert_config): initializer = bc.create_initializer(bert_config.initializer_range) self.layer1 = bc.dense(bert_config.hidden_size, initializer, bc.get_activation(bert_config.hidden_act)) self.logit_dense1 = bc.dense(2, initializer) self.logit_dense2 = bc.dense(2, initializer) self.graph_built = False
def get_lexical_lookup(self): input_tensor_var = tf.compat.v1.get_variable( name="base_second", shape=[self.config.hidden_size], initializer=bc.create_initializer(self.config.initializer_range)) batch_size, seq_length = bc.get_shape_list(self.input_ids) input_tensor = tf.reshape(input_tensor_var, [1, 1, -1]) return input_tensor
def mlp_max(seq_output): # first_tokens : [batch_size, num_window, hidden_size] first_tokens = seq_output[:, :, 0, :] dense_layer1 = tf.keras.layers.Dense( config.hidden_size * 4, activation=tf.keras.activations.tanh, kernel_initializer=create_initializer(config.initializer_range)) dense_layer2 = tf.keras.layers.Dense( config.hidden_size, activation=tf.keras.activations.tanh, kernel_initializer=create_initializer(config.initializer_range)) first_tokens = window_wise_dropout(first_tokens) # hidden1 : [batch_size, num_window, hidden_size] hidden1 = dense_layer1(first_tokens) # hidden1 : [batch_size, num_window, hidden_size hidden2 = dense_layer2(hidden1) return tf.reduce_max(hidden2, axis=1)
def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=True, features=None, scope=None): super(DualBertTwoInputWithDoubleInputLength, self).__init__() input_ids1 = features["input_ids1"] input_mask1 = features["input_mask1"] segment_ids1 = features["segment_ids1"] input_ids2 = features["input_ids2"] input_mask2 = features["input_mask2"] segment_ids2 = features["segment_ids2"] with tf.compat.v1.variable_scope(dual_model_prefix1): model_1 = BertModel( config=config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids, use_one_hot_embeddings=use_one_hot_embeddings, ) with tf.compat.v1.variable_scope(dual_model_prefix2): model_2 = DoubleLengthInputModel( config, is_training, input_ids1, input_mask1, segment_ids1, input_ids2, input_mask2, segment_ids2, use_one_hot_embeddings=use_one_hot_embeddings, ) model_1_first_token = model_1.get_sequence_output()[:, 0, :] model_2_first_token = model_2.get_sequence_output()[:, 0, :] rep = tf.concat([model_1_first_token, model_2_first_token], axis=1) self.sequence_output = model_1.get_sequence_output() dense_layer = tf.keras.layers.Dense( config.hidden_size, activation=tf.keras.activations.tanh, kernel_initializer=create_initializer(config.initializer_range)) pooled_output = dense_layer(rep) self.pooled_output = pooled_output
def get_masked_lm_output_albert(model_config, input_tensor, output_weights, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = bert_common.gather_indexes(input_tensor, positions) with tf.compat.v1.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.compat.v1.variable_scope("transform"): input_tensor = tf.keras.layers.Dense( model_config.embedding_size, activation=bert_common.get_activation(model_config.hidden_act), kernel_initializer=bert_common.create_initializer( model_config.initializer_range))(input_tensor) input_tensor = bert_common.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.compat.v1.get_variable( "output_bias", shape=[model_config.vocab_size], initializer=tf.compat.v1.zeros_initializer()) print("output_weights", output_weights.shape) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot(label_ids, depth=model_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum( input_tensor=log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(input_tensor=label_weights * per_example_loss) denominator = tf.reduce_sum(input_tensor=label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs)
def get_regression_and_loss(hidden_vector, loss_label): logits = bc.dense(2, bc.create_initializer( bert_config.initializer_range))(hidden_vector) gold_prob = loss_to_prob_pair(loss_label) logits = tf.reshape(logits, gold_prob.shape) per_example_loss = tf.nn.softmax_cross_entropy_with_logits(gold_prob, logits, axis=-1, name=None) per_example_loss = tf.cast(masked_lm_weights, tf.float32) * per_example_loss losses = tf.reduce_sum(per_example_loss, axis=1) loss = tf.reduce_mean(losses) return loss, per_example_loss, logits
def __init__(self, config): super(ForwardColumn, self).__init__() hidden_size = config.hidden_size initializer = bc.create_initializer(config.initializer_range) attention_head_size = int(hidden_size / config.num_attention_heads) self.attention_head_size = attention_head_size num_attention_heads = config.num_attention_heads self.num_attention_heads = num_attention_heads self.attention_probs_dropout_prob = config.attention_probs_dropout_prob self.hidden_dropout_prob = config.hidden_dropout_prob self.attention_unit = AttentionUnit( num_attention_heads, attention_head_size, hidden_size, config.hidden_dropout_prob, config.attention_probs_dropout_prob, initializer) self.residual_ff = ResidualFeedforward(hidden_size, config.intermediate_size, config.hidden_act, config.hidden_dropout_prob, initializer) self.attention_mask = None
def apply(self, input_ids, segment_ids, initializer_range, vocab_size, hidden_size, type_vocab_size, max_position_embeddings, hidden_dropout_prob, use_one_hot_embeddings): initializer = bc.create_initializer(initializer_range) self.embedding_table = tf.compat.v1.get_variable( name="word_embeddings", shape=[vocab_size, hidden_size], initializer=initializer) self.token_type_table = tf.compat.v1.get_variable( name="token_type_embeddings", shape=[type_vocab_size, hidden_size], initializer=initializer) self.full_position_embeddings = tf.compat.v1.get_variable( name="position_embeddings", shape=[max_position_embeddings, hidden_size], initializer=initializer) # Perform embedding lookup on the word ids. (self.embedding_output, self.embedding_table) = bc.embedding_lookup2( input_ids=input_ids, embedding_table=self.embedding_table, vocab_size=vocab_size, embedding_size=hidden_size, use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = bc.embedding_postprocessor2( input_tensor=self.embedding_output, token_type_table=self.token_type_table, full_position_embeddings=self.full_position_embeddings, use_token_type=True, token_type_ids=segment_ids, token_type_vocab_size=type_vocab_size, use_position_embeddings=True, max_position_embeddings=max_position_embeddings, dropout_prob=hidden_dropout_prob) return self.embedding_output
def __init__(self, config, is_training, use_one_hot_embeddings): super(HorizontalAlpha, self).__init__() if not is_training: config.set_attrib("hidden_dropout_prob", 0.0) config.set_attrib("attention_probs_dropout_prob", 0.0) initializer = bc.create_initializer(config.initializer_range) self.embedding_layer = Embedding2() self.embedding_projector = bc.dense(config.hidden_size, initializer) self.config = config num_columns = config.num_columns self.column_list = [] for tower_idx in range(num_columns): column = ForwardColumn(config) self.column_list.append(column) self.num_layers = config.num_hidden_layers self.num_columns = config.num_columns self.num_column_tokens = config.num_column_tokens self.column_embedding_list = [] self.use_one_hot_embeddings = use_one_hot_embeddings self.config = config column_mask = [] for column_idx in range(1, self.num_columns): column_embedding = tf.Variable( lambda: initializer(shape=(self.num_column_tokens, config. hidden_size), dtype=tf.float32), name="column_embedding_{}".format(column_idx)) self.column_embedding_list.append(column_embedding) column_mask += [1] * self.num_column_tokens self.column_mask = tf.constant(column_mask) self.all_raw_layers = [] self.all_main_layers = [] self.sequence_output = None self.pooled_output = None
def get_loss_independently(bert_config, input_tensor, masked_lm_positions, masked_lm_weights, loss_base, loss_target): input_tensor = bc.gather_indexes(input_tensor, masked_lm_positions) hidden = bc.dense(bert_config.hidden_size, bc.create_initializer(bert_config.initializer_range), bc.get_activation(bert_config.hidden_act))(input_tensor) def get_regression_and_loss(hidden_vector, loss_label): logits = bc.dense(2, bc.create_initializer( bert_config.initializer_range))(hidden_vector) gold_prob = loss_to_prob_pair(loss_label) logits = tf.reshape(logits, gold_prob.shape) per_example_loss = tf.nn.softmax_cross_entropy_with_logits(gold_prob, logits, axis=-1, name=None) per_example_loss = tf.cast(masked_lm_weights, tf.float32) * per_example_loss losses = tf.reduce_sum(per_example_loss, axis=1) loss = tf.reduce_mean(losses) return loss, per_example_loss, logits loss1, per_example_loss1, logits1 = get_regression_and_loss( hidden, loss_base) loss2, per_example_loss2, logits2 = get_regression_and_loss( hidden, loss_target) prob1 = tf.nn.softmax(logits1)[:, :, 0] prob2 = tf.nn.softmax(logits2)[:, :, 0] total_loss = loss1 + loss2 return total_loss, loss1, loss2, per_example_loss1, per_example_loss2, prob1, prob2
def transformer_model(input_tensor, attention_mask=None, input_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mr_num_route=10, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, is_training=True, do_return_all_layers=False): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. """ if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = int(hidden_size / num_attention_heads) input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] input_width = input_shape[2] initializer = create_initializer(initializer_range) ext_tensor = tf.compat.v1.get_variable("ext_tensor", shape=[num_hidden_layers, mr_num_route, EXT_SIZE ,hidden_size], initializer=initializer, ) ext_tensor_inter = tf.compat.v1.get_variable("ext_tensor_inter", shape=[num_hidden_layers, mr_num_route, intermediate_size], initializer=initializer, ) # The Transformer performs sum residuals on all layers so the input needs # to be the same as the hidden size. if input_width != hidden_size: raise ValueError("The width of the input tensor (%d) != hidden size (%d)" % (input_width, hidden_size)) # We keep the representation as a 2D tensor to avoid re-shaping it back and # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on # the GPU/CPU but may not be free on the TPU, so we want to minimize them to # help the optimizer. prev_output = reshape_to_matrix(input_tensor) def is_mr_layer(layer_idx): if layer_idx > 1: return True else: return False all_layer_outputs = [] for layer_idx in range(num_hidden_layers): if not is_mr_layer(layer_idx): with tf.compat.v1.variable_scope("layer_%d" % layer_idx): layer_input = prev_output with tf.compat.v1.variable_scope("attention"): attention_heads = [] with tf.compat.v1.variable_scope("self"): attention_head = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, size_per_head=attention_head_size, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range, do_return_2d_tensor=True, batch_size=batch_size, from_seq_length=seq_length, to_seq_length=seq_length) attention_heads.append(attention_head) attention_output = None if len(attention_heads) == 1: attention_output = attention_heads[0] else: # In the case where we have other sequences, we just concatenate # them to the self-attention head before the projection. attention_output = tf.concat(attention_heads, axis=-1) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.compat.v1.variable_scope("output"): attention_output = dense(hidden_size, initializer)(attention_output) attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) # The activation is only applied to the "intermediate" hidden layer. with tf.compat.v1.variable_scope("intermediate"): intermediate_output = dense(intermediate_size, initializer, activation=intermediate_act_fn)(attention_output) # Down-project back to `hidden_size` then add the residual. with tf.compat.v1.variable_scope("output"): layer_output = dense(hidden_size, initializer)(intermediate_output) layer_output = dropout(layer_output, hidden_dropout_prob) layer_output = layer_norm(layer_output + attention_output) prev_output = layer_output all_layer_outputs.append(layer_output) with tf.compat.v1.variable_scope("mr_key"): key_output = tf.keras.layers.Dense( mr_num_route, kernel_initializer=create_initializer(initializer_range))(intermediate_output) key_output = dropout(key_output, hidden_dropout_prob) if is_training: key = tf.random.categorical(key_output, 1) # [batch_size, 1] key = tf.reshape(key, [-1]) else: key = tf.math.argmax(input=key_output, axis=1) else: # Case MR layer with tf.compat.v1.variable_scope("layer_%d" % layer_idx): layer_input = prev_output ext_slice = tf.gather(ext_tensor[layer_idx], key) ext_interm_slice = tf.gather(ext_tensor_inter[layer_idx], key) print("ext_slice (batch*seq, ", ext_slice.shape) with tf.compat.v1.variable_scope("attention"): attention_heads = [] with tf.compat.v1.variable_scope("self"): attention_head = attention_layer_w_ext( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, ext_slice=ext_slice, num_attention_heads=num_attention_heads, size_per_head=attention_head_size, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range, do_return_2d_tensor=True, batch_size=batch_size, from_seq_length=seq_length, to_seq_length=seq_length) attention_head = attention_head + ext_slice[:,EXT_ATT_OUT,:] attention_heads.append(attention_head) attention_output = None if len(attention_heads) == 1: attention_output = attention_heads[0] else: # In the case where we have other sequences, we just concatenate # them to the self-attention head before the projection. attention_output = tf.concat(attention_heads, axis=-1) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.compat.v1.variable_scope("output"): attention_output = dense(hidden_size, initializer)(attention_output) attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = attention_output + ext_slice[:,EXT_ATT_PROJ,:] attention_output = layer_norm(attention_output + layer_input) # The activation is only applied to the "intermediate" hidden layer. with tf.compat.v1.variable_scope("intermediate"): intermediate_output = dense(intermediate_size, initializer, activation=intermediate_act_fn)(attention_output) intermediate_output = ext_interm_slice + intermediate_output # Down-project back to `hidden_size` then add the residual. with tf.compat.v1.variable_scope("output"): layer_output = dense(hidden_size, initializer)(intermediate_output) layer_output = layer_output + ext_slice[:, EXT_LAYER_OUT,:] layer_output = dropout(layer_output, hidden_dropout_prob) layer_output = layer_norm(layer_output + attention_output) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: final_outputs = [] for layer_output in all_layer_outputs: final_output = reshape_from_matrix(layer_output, input_shape) final_outputs.append(final_output) return final_outputs, key else: final_output = reshape_from_matrix(prev_output, input_shape) return final_output, key
def embedding_postprocessor( self, d_input_ids, input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): input_shape = bc.get_shape_list(d_input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] width = self.config.hidden_size output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = tf.compat.v1.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=bc.create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.compat.v1.assert_less_equal( seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.compat.v1.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=bc.create_initializer(initializer_range)) # Since the position embedding table is a learned variable, we create it # using a (long) sequence length `max_position_embeddings`. The actual # sequence length might be shorter than this, for faster training of # tasks that do not have long sequences. # # So `full_position_embeddings` is effectively an embedding table # for position [0, 1, 2, ..., max_position_embeddings-1], and the current # sequence has positions [0, 1, 2, ... seq_length-1], so we can just # perform a slice. position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) # Only the last two dimensions are relevant (`seq_length` and `width`), so # we broadcast among the first dimensions, which is typically just # the batch size. position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output = bc.layer_norm_and_dropout(output, dropout_prob) return output
def attention_layer_w_ext(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, size_per_head=512, ext_slice=None, # [Num_tokens, n_items, hidden_dim] query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, do_return_2d_tensor=False, batch_size=None, from_seq_length=None, to_seq_length=None): """Performs multi-headed attention from `from_tensor` to `to_tensor`. This is an implementation of multi-headed attention based on "Attention is all you Need". If `from_tensor` and `to_tensor` are the same, then this is self-attention. Each timestep in `from_tensor` attends to the corresponding sequence in `to_tensor`, and returns a fixed-with vector. This function first projects `from_tensor` into a "query" tensor and `to_tensor` into "key" and "value" tensors. These are (effectively) a list of tensors of length `num_attention_heads`, where each tensor is of shape [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dot-producted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor and returned. In practice, the multi-headed attention are done with transposes and reshapes rather than actual separate tensors. Args: from_tensor: float Tensor of shape [batch_size, from_seq_length, from_width]. to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width]. attention_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. size_per_head: int. Size of each attention head. query_act: (optional) Activation function for the query transform. key_act: (optional) Activation function for the key transform. value_act: (optional) Activation function for the value transform. attention_probs_dropout_prob: (optional) float. Dropout probability of the attention probabilities. initializer_range: float. Range of the weight initializer. do_return_2d_tensor: bool. If True, the output will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]. If False, the output will be of shape [batch_size, from_seq_length, num_attention_heads * size_per_head]. batch_size: (Optional) int. If the input is 2D, this might be the batch size of the 3D version of the `from_tensor` and `to_tensor`. from_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `from_tensor`. to_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `to_tensor`. Returns: float Tensor of shape [batch_size, from_seq_length, num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is true, this will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]). Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ def transpose_for_scores(input_tensor, batch_size, num_attention_heads, seq_length, width): output_tensor = tf.reshape( input_tensor, [batch_size, seq_length, num_attention_heads, width]) output_tensor = tf.transpose(a=output_tensor, perm=[0, 2, 1, 3]) return output_tensor from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) to_shape = get_shape_list(to_tensor, expected_rank=[2, 3]) if len(from_shape) != len(to_shape): raise ValueError( "The rank of `from_tensor` must match the rank of `to_tensor`.") if len(from_shape) == 3: batch_size = from_shape[0] from_seq_length = from_shape[1] to_seq_length = to_shape[1] elif len(from_shape) == 2: if (batch_size is None or from_seq_length is None or to_seq_length is None): raise ValueError( "When passing in rank 2 tensors to attention_layer, the values " "for `batch_size`, `from_seq_length`, and `to_seq_length` " "must all be specified.") # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` from_tensor_2d = reshape_to_matrix(from_tensor) to_tensor_2d = reshape_to_matrix(to_tensor) def get_ext_slice(idx): return ext_slice[:, idx, :] print("from_tensor_2d ", from_tensor_2d.shape) query_in = from_tensor_2d + get_ext_slice(EXT_QUERY_IN) query_in = from_tensor_2d # `query_layer` = [B*F, N*H] query_layer = tf.keras.layers.Dense( num_attention_heads * size_per_head, activation=query_act, name="query", kernel_initializer=create_initializer(initializer_range))(query_in) query_layer = query_layer + get_ext_slice(EXT_QUERY_OUT) key_in = to_tensor_2d key_in = to_tensor_2d + get_ext_slice(EXT_KEY_IN) # `key_layer` = [B*T, N*H] key_layer = tf.keras.layers.Dense( num_attention_heads * size_per_head, activation=key_act, name="key", kernel_initializer=create_initializer(initializer_range))(key_in) key_layer = key_layer + get_ext_slice(EXT_KEY_OUT) value_in = to_tensor_2d value_in = to_tensor_2d + get_ext_slice(EXT_VALUE_IN) # `value_layer` = [B*T, N*H] value_layer = tf.keras.layers.Dense( num_attention_heads * size_per_head, activation=value_act, name="value", kernel_initializer=create_initializer(initializer_range))(value_in) value_layer = value_layer + get_ext_slice(EXT_VALUE_OUT) # `query_layer` = [B, N, F, H] query_layer = transpose_for_scores(query_layer, batch_size, num_attention_heads, from_seq_length, size_per_head) # `key_layer` = [B, N, T, H] key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads, to_seq_length, size_per_head) # Take the dot product between "query" and "key" to get the raw # attention scores. # `attention_scores` = [B, N, F, T] attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(size_per_head))) if attention_mask is not None: # `attention_mask` = [B, 1, F, T] attention_mask = tf.expand_dims(attention_mask, axis=[1]) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_scores += adder # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = tf.nn.softmax(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = dropout(attention_probs, attention_probs_dropout_prob) # `value_layer` = [B, T, N, H] value_layer = tf.reshape( value_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head]) # `value_layer` = [B, N, T, H] value_layer = tf.transpose(a=value_layer, perm=[0, 2, 1, 3]) # `context_layer` = [B, N, F, H] context_layer = tf.matmul(attention_probs, value_layer) # `context_layer` = [B, F, N, H] context_layer = tf.transpose(a=context_layer, perm=[0, 2, 1, 3]) if do_return_2d_tensor: # `context_layer` = [B*F, N*V] context_layer = tf.reshape( context_layer, [batch_size * from_seq_length, num_attention_heads * size_per_head]) else: # `context_layer` = [B, F, N*V] context_layer = tf.reshape( context_layer, [batch_size, from_seq_length, num_attention_heads * size_per_head]) return context_layer
def transformer_model(input_tensor, attention_mask=None, input_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, is_training=True, do_return_all_layers=False): if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = int(hidden_size / num_attention_heads) input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] input_width = input_shape[2] initializer = create_initializer(initializer_range) # The Transformer performs sum residuals on all layers so the input needs # to be the same as the hidden size. if input_width != hidden_size: raise ValueError( "The width of the input tensor (%d) != hidden size (%d)" % (input_width, hidden_size)) # We keep the representation as a 2D tensor to avoid re-shaping it back and # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on # the GPU/CPU but may not be free on the TPU, so we want to minimize them to # help the optimizer. prev_output = reshape_to_matrix(input_tensor) #prev_output = input_tensor all_layer_outputs = [] for layer_idx in range(num_hidden_layers): with tf.compat.v1.variable_scope("layer_%d" % layer_idx): layer_input = prev_output with tf.compat.v1.variable_scope("attention"): attention_heads = [] with tf.compat.v1.variable_scope("self"): attention_head = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, size_per_head=attention_head_size, attention_probs_dropout_prob= attention_probs_dropout_prob, initializer_range=initializer_range, do_return_2d_tensor=True, batch_size=batch_size, from_seq_length=seq_length, to_seq_length=seq_length) attention_heads.append(attention_head) attention_output = None if len(attention_heads) == 1: attention_output = attention_heads[0] else: # In the case where we have other sequences, we just concatenate # them to the self-attention head before the projection. attention_output = tf.concat(attention_heads, axis=-1) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.compat.v1.variable_scope("output"): attention_output = dense(hidden_size, initializer)(attention_output) attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) # The activation is only applied to the "intermediate" hidden layer. with tf.compat.v1.variable_scope("intermediate"): intermediate_output = dense( intermediate_size, initializer, activation=intermediate_act_fn)(attention_output) # Down-project back to `hidden_size` then add the residual. with tf.compat.v1.variable_scope("output"): layer_output = dense(hidden_size, initializer)(intermediate_output) layer_output = dropout(layer_output, hidden_dropout_prob) layer_output = layer_norm(layer_output + attention_output) prev_output = layer_output all_layer_outputs.append(layer_output) # # return all_layer_outputs if do_return_all_layers: final_outputs = [] for layer_output in all_layer_outputs: final_output = reshape_from_matrix(layer_output, input_shape) final_outputs.append(final_output) return final_outputs else: final_output = reshape_from_matrix(prev_output, input_shape) return final_output
def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=True, scope=None): super(ReshapeBertModel, self).__init__() config = copy.deepcopy(config) self.config = config if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) if token_type_ids is None: token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) with tf.compat.v1.variable_scope(scope, default_name="bert"): with tf.compat.v1.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.embedding_output, self.embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.hidden_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = embedding_postprocessor( input_tensor=self.embedding_output, use_token_type=True, token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with tf.compat.v1.variable_scope("encoder"): attention_mask = create_attention_mask_from_input_mask( input_ids, input_mask) self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=attention_mask, input_mask=input_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, is_training=is_training, intermediate_size=config.intermediate_size, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config. attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True) self.sequence_output = self.all_encoder_layers[-1] with tf.compat.v1.variable_scope("pooler"): first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = tf.keras.layers.Dense( config.hidden_size, activation=tf.keras.activations.tanh, kernel_initializer=create_initializer( config.initializer_range))(first_token_tensor)
def __init__(self, config, is_training, input_ids1, input_mask1, token_type_ids1, input_ids2, input_mask2, token_type_ids2, use_one_hot_embeddings=True, features=None, scope=None): super(DoubleLengthInputModel, self).__init__() input_shape = get_shape_list(input_ids1, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] # feed input separtely to the network config = copy.deepcopy(config) self.config = config if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 batch_concat_input_ids = tf.concat([input_ids1, input_ids2], 0) # [ batch_size * 2, seq_length] batch_concat_concat_token_ids = tf.concat( [token_type_ids1, token_type_ids2], 0) input_mask_seq_concat = tf.concat([input_mask1, input_mask2], 1) with tf.compat.v1.variable_scope(scope, default_name="bert"): with tf.compat.v1.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (embedding_output_batch_concat, self.embedding_table) = embedding_lookup( input_ids=batch_concat_input_ids, vocab_size=config.vocab_size, embedding_size=config.hidden_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. embedding_output_batch_concat = embedding_postprocessor( input_tensor=embedding_output_batch_concat, use_token_type=True, token_type_ids=batch_concat_concat_token_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) embedding_output_stacked = tf.reshape( embedding_output_batch_concat, [2, batch_size, seq_length, -1]) embedding_output_stacked = tf.transpose(embedding_output_stacked, [1, 0, 2, 3]) embedding_output_seq_concat = tf.reshape( embedding_output_stacked, [batch_size, seq_length * 2, -1]) self.embedding_output = embedding_output_seq_concat with tf.compat.v1.variable_scope("encoder"): attention_mask = create_attention_mask_from_input_mask( input_mask_seq_concat, input_mask_seq_concat) self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=attention_mask, input_mask=input_mask_seq_concat, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, is_training=is_training, intermediate_size=config.intermediate_size, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config. attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True) self.sequence_output = self.all_encoder_layers[-1] with tf.compat.v1.variable_scope("pooler"): first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = tf.keras.layers.Dense( config.hidden_size, activation=tf.keras.activations.tanh, kernel_initializer=create_initializer( config.initializer_range))(first_token_tensor)
def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=True, scope=None): """Constructor for BertModel. Args: config: `BertConfig` instance. is_training: bool. rue for training model, false for eval model. Controls whether dropout will be applied. input_ids: int32 Tensor of shape [batch_size, seq_length]. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word embeddings or tf.embedding_lookup() for the word embeddings. On the TPU, it is must faster if this is True, on the CPU or GPU, it is faster if this is False. scope: (optional) variable scope. Defaults to "bert". Raises: ValueError: The config is invalid or one of the input tensor shapes is invalid. """ config = copy.deepcopy(config) if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) if token_type_ids is None: token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) with tf.compat.v1.variable_scope(scope, default_name="bert"): with tf.compat.v1.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.embedding_output, self.embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.hidden_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = embedding_postprocessor( input_tensor=self.embedding_output, use_token_type=True, token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with tf.compat.v1.variable_scope("encoder"): # This converts a 2D mask of shape [batch_size, seq_length] to a 3D # mask of shape [batch_size, seq_length, seq_length] which is used # for the attention scores. attention_mask = create_attention_mask_from_input_mask( input_ids, input_mask) # Run the stacked transformer. # `sequence_output` shape = [batch_size, seq_length, hidden_size]. self.all_encoder_layers, key = transformer_model( input_tensor=self.embedding_output, attention_mask=attention_mask, input_mask=input_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, is_training=is_training, #mr_layer=config.mr_layer, mr_num_route=config.mr_num_route, #mr_key_layer=config.mr_key_layer, intermediate_size=config.intermediate_size, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True) self.key = key self.sequence_output = self.all_encoder_layers[-1] # The "pooler" converts the encoded sequence tensor of shape # [batch_size, seq_length, hidden_size] to a tensor of shape # [batch_size, hidden_size]. This is necessary for segment-level # (or segment-pair-level) classification tasks where we need a fixed # dimensional representation of the segment. with tf.compat.v1.variable_scope("pooler"): # We "pool" the model by simply taking the hidden state corresponding # to the first token. We assume that this has been pre-trained first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = tf.keras.layers.Dense(config.hidden_size, activation=tf.keras.activations.tanh, kernel_initializer=create_initializer(config.initializer_range))(first_token_tensor)
def binary_prediction(bert_config, input_tensor): logits = bert_common.dense(2, bert_common.create_initializer(bert_config.initializer_range))(input_tensor) log_probs = tf.nn.softmax(logits, axis=2) return logits, log_probs
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" logging.info("*** Features ***") for name in sorted(features.keys()): logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] d_input_ids = features["d_input_ids"] d_input_mask = features["d_input_mask"] d_location_ids = features["d_location_ids"] next_sentence_labels = features["next_sentence_labels"] if dict_run_config.prediction_op == "loss": seed = 0 else: seed = None if dict_run_config.prediction_op == "loss_fixed_mask" or train_config.fixed_mask: masked_input_ids = input_ids masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = tf.ones_like(masked_lm_positions, dtype=tf.float32) else: masked_input_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights \ = random_masking(input_ids, input_mask, train_config.max_predictions_per_seq, MASK_ID, seed) if dict_run_config.use_d_segment_ids: d_segment_ids = features["d_segment_ids"] else: d_segment_ids = None is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = model_class( config=bert_config, d_config=dbert_config, is_training=is_training, input_ids=masked_input_ids, input_mask=input_mask, d_input_ids=d_input_ids, d_input_mask=d_input_mask, d_location_ids=d_location_ids, use_target_pos_emb=dict_run_config.use_target_pos_emb, token_type_ids=segment_ids, use_one_hot_embeddings=train_config.use_one_hot_embeddings, d_segment_ids=d_segment_ids, pool_dict_output=dict_run_config.pool_dict_output, ) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels) total_loss = masked_lm_loss if dict_run_config.train_op == "entry_prediction": score_label = features["useful_entry"] # [batch, 1] score_label = tf.reshape(score_label, [-1]) entry_logits = bert_common.dense(2, bert_common.create_initializer(bert_config.initializer_range))\ (model.get_dict_pooled_output()) print("entry_logits: ", entry_logits.shape) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=entry_logits, labels=score_label) loss = tf.reduce_mean(losses) total_loss = loss if dict_run_config.train_op == "lookup": lookup_idx = features["lookup_idx"] lookup_loss, lookup_example_loss, lookup_score = \ sequence_index_prediction(bert_config, lookup_idx, model.get_sequence_output()) total_loss += lookup_loss tvars = tf.compat.v1.trainable_variables() init_vars = {} scaffold_fn = None if train_config.init_checkpoint: if dict_run_config.is_bert_checkpoint: map1, map2, init_vars = get_bert_assignment_map_for_dict(tvars, train_config.init_checkpoint) def load_fn(): tf.compat.v1.train.init_from_checkpoint(train_config.init_checkpoint, map1) tf.compat.v1.train.init_from_checkpoint(train_config.init_checkpoint, map2) else: map1, init_vars = get_assignment_map_as_is(tvars, train_config.init_checkpoint) def load_fn(): tf.compat.v1.train.init_from_checkpoint(train_config.init_checkpoint, map1) if train_config.use_tpu: def tpu_scaffold(): load_fn() return tf.compat.v1.train.Scaffold() scaffold_fn = tpu_scaffold else: load_fn() logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in init_vars: init_string = ", *INIT_FROM_CKPT*" logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) logging.info("Total parameters : %d" % get_param_num()) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: if train_config.gradient_accumulation == 1: train_op = optimization.create_optimizer_from_config(total_loss, train_config) else: logging.info("Using gradient accumulation : %d" % train_config.gradient_accumulation) train_op = get_accumulated_optimizer_from_config(total_loss, train_config, tvars, train_config.gradient_accumulation) output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: eval_metrics = (metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels ]) output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: if dict_run_config.prediction_op == "gradient": logging.info("Fetching gradient") gradient = get_gradients(model, masked_lm_log_probs, train_config.max_predictions_per_seq, bert_config.vocab_size) predictions = { "masked_input_ids": masked_input_ids, #"input_ids": input_ids, "d_input_ids": d_input_ids, "masked_lm_positions": masked_lm_positions, "gradients": gradient, } elif dict_run_config.prediction_op == "loss" or dict_run_config.prediction_op == "loss_fixed_mask": logging.info("Fetching loss") predictions = { "masked_lm_example_loss": masked_lm_example_loss, } else: raise Exception("prediction target not specified") output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, predictions=predictions, scaffold_fn=scaffold_fn) return output_spec
def dense(hidden_size, name): return tf.keras.layers.Dense(hidden_size, activation=tf.keras.activations.tanh, name=name, kernel_initializer=create_initializer( config.initializer_range))
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument tf_logging.info("model_fn_sero_classification") """The `model_fn` for TPUEstimator.""" log_features(features) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] batch_size, _ = get_shape_list(input_mask) use_context = tf.ones([batch_size, 1], tf.int32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) # Updated if modeling == "sero": model_class = SeroDelta print("Using SeroDelta") elif modeling == "sero_epsilon": model_class = SeroEpsilon print("Using SeroEpsilon") else: assert False with tf.compat.v1.variable_scope("sero"): model = model_class(config, is_training, train_config.use_one_hot_embeddings) input_ids = tf.expand_dims(input_ids, 1) input_mask = tf.expand_dims(input_mask, 1) segment_ids = tf.expand_dims(segment_ids, 1) sequence_output = model.network_stacked(input_ids, input_mask, segment_ids, use_context) first_token_tensor = tf.squeeze(sequence_output[:, 0:1, :], axis=1) pooled_output = tf.keras.layers.Dense( config.hidden_size, activation=tf.keras.activations.tanh, kernel_initializer=create_initializer( config.initializer_range))(first_token_tensor) if "bias_loss" in special_flags: loss_weighting = reweight_zero else: loss_weighting = None task = Classification(3, features, pooled_output, is_training, loss_weighting) loss = task.loss tvars = tf.compat.v1.trainable_variables() assignment_fn = assignment_map.assignment_map_v2_to_v2 initialized_variable_names, init_fn = get_init_fn( tvars, train_config.init_checkpoint, assignment_fn) scaffold_fn = get_tpu_scaffold_or_init(init_fn, train_config.use_tpu) log_var_assignments(tvars, initialized_variable_names) TPUEstimatorSpec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec if mode == tf.estimator.ModeKeys.TRAIN: tf_logging.info("Using single lr ") train_op = optimization.create_optimizer_from_config( loss, train_config) output_spec = TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: output_spec = TPUEstimatorSpec(mode=model, loss=loss, eval_metrics=task.eval_metrics(), scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.PREDICT: predictions = {"input_ids": input_ids, "logits": task.logits} output_spec = TPUEstimatorSpec(mode=model, loss=loss, predictions=predictions, scaffold_fn=scaffold_fn) return output_spec