def attention_bias_local_block(mesh, block_length, memory_length, dtype=tf.int32): """Bias for attention for local blocks where attention to right is disallowed. Create the bias matrix by using two separate masks, one for the memory part which doesn't overlap with the query and second which interacts with the query and should be disallowed to look to the right of the current query position. Args: mesh: a MeshTensorflow object block_length: a mtf.Dimension memory_length: a mtf.Dimension dtype: a tf.dtype Returns: a mtf.Tensor with shape [block_length, memory_length] """ memory_length = mtf.Dimension(memory_length.name, block_length.size) memory_mask = mtf.zeros(mesh, [block_length, memory_length], dtype=dtype) mask = mtf.cast(mtf.less(mtf.range(mesh, block_length, dtype=dtype), mtf.range(mesh, memory_length, dtype=dtype)), dtype=dtype) mask = mtf.cast( mtf.concat([memory_mask, mask], memory_length.name), dtype=tf.float32) * -1e9 return mask
def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq, finished_scores, finished_in_finished, *unused_states): """Checking termination condition. We terminate when we decoded up to decode_length or the lowest scoring item in finished has a greater score that the highest prob item in alive divided by the max length penalty Args: i: loop index alive_log_probs: probabilities of the beams. [batch_size, beam_size] finished_scores: scores for each of these sequences. [batch_size, beam_size] finished_in_finished: finished bools for each of these sequences. [batch_size, beam_size] Returns: Bool. """ # TODO(noam): support a different decode length... # decode_length = mtf.constant(mesh, length_dim.size, dtype=tf.int32) # del alive_log_probs, finished_scores, finished_in_finished # return mtf.less(i, length_dim.size) if not stop_early: return mtf.less(i, decode_length) max_length_penalty = mtf.pow( ((5. + mtf.cast(decode_length, finished_scores.dtype)) / 6.), alpha) # The best possible score of the most likely alive sequence. lower_bound_alive_scores = mtf.gather( alive_log_probs, mtf.constant(mesh, 0, dtype=tf.int32), beam_dim) / max_length_penalty # Now to compute the lowest score of a finished sequence in finished # If the sequence isn't finished, we multiply it's score by 0. since # scores are all -ve, taking the min will give us the score of the lowest # finished item. lowest_score_of_finished_in_finished = mtf.reduce_min( finished_scores * mtf.cast(finished_in_finished, finished_scores.dtype), reduced_dim=beam_dim) # If none of the sequences have finished, then the min will be 0 and # we have to replace it by -ve INF if it is. The score of any seq in alive # will be much higher than -ve INF and the termination condition will not # be met. lowest_score_of_finished_in_finished += ((1. - mtf.cast( mtf.reduce_any(finished_in_finished, reduced_dim=beam_dim), finished_scores.dtype)) * -INF) bound_is_met = mtf.reduce_all( mtf.greater(lowest_score_of_finished_in_finished, lower_bound_alive_scores)) return mtf.logical_and(mtf.less(i, decode_length), mtf.logical_not(bound_is_met))
def attention_bias_local_2d_block(mesh, h_dim, w_dim, memory_h_dim, memory_w_dim, dtype=tf.int32): """Bias for attention for local blocks where attention to right is disallowed. Create the bias matrix by using two separate masks, one for the memory part which doesn't overlap with the query and second which interacts with the query and should be disallowed to look to the right of the current query position. Args: mesh: a MeshTensorflow object h_dim: a mtf.Dimension w_dim: a mtf.Dimension memory_h_dim: a mtf.Dimension memory_w_dim: a mtf.Dimension dtype: a tf.dtype Returns: a mtf.Tensor with shape [block_length, memory_length] """ memory_height = mtf.Dimension(memory_h_dim.name, h_dim.size) memory_width = mtf.Dimension(memory_w_dim.name, w_dim.size) mask_top_visible = mtf.zeros(mesh, [h_dim, memory_height], dtype=dtype) mask_left_visible = mtf.zeros(mesh, [w_dim, memory_width], dtype=dtype) mask_query = mtf.greater( mtf.range(mesh, memory_height, dtype=tf.int32), mtf.range(mesh, memory_width, dtype=dtype)) width_mask = mtf.concat([mask_left_visible, mask_query], memory_width.name) mask = mtf.cast( mtf.concat([mask_top_visible, width_mask], memory_height.name), dtype=tf.float32) * -1e9 return mask
def dense(x, output_dim, reduced_dims=None, expert_dims=None, use_bias=True, activation=None, master_dtype=tf.float32, slice_dtype=tf.float32, variable_dtype=None, name=None): """Dense layer doing (kernel*x + bias) computation. Args: x: a mtf.Tensor of shape [..., reduced_dims]. output_dim: a mtf.Dimension reduced_dims: an optional list of mtf.Dimensions of x to be reduced. If omitted, we reduce the last dimension. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. use_bias: a boolean, whether to add bias. activation: an optional function from mtf.Tensor to mtf.Tensor master_dtype: a tf.dtype (deprecated - use variable_dtype) slice_dtype: a tf.dtype (deprecated - use variable_dtype) variable_dtype: a mtf.VariableDType name: a string. variable scope. Returns: a mtf.Tensor of shape [..., output_dim]. """ if variable_dtype is None: variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype) if expert_dims is None: expert_dims = [] if reduced_dims is None: reduced_dims = x.shape.dims[-1:] w_shape = mtf.Shape(expert_dims + reduced_dims + [output_dim]) output_shape = mtf.Shape( [d for d in x.shape.dims if d not in reduced_dims] + [output_dim]) with tf.variable_scope(name, default_name="dense"): stddev = mtf.list_product(d.size for d in reduced_dims) ** -0.5 w = mtf.get_variable( x.mesh, "kernel", w_shape, initializer=tf.random_normal_initializer(stddev=stddev), dtype=variable_dtype) w = mtf.cast(w, x.dtype) y = mtf.einsum([x, w], output_shape) if use_bias: b = mtf.get_variable( x.mesh, "bias", mtf.Shape(expert_dims + [output_dim]), initializer=tf.zeros_initializer(), dtype=variable_dtype) y += b if activation is not None: y = activation(y) return y
def attention_mask_autoregressive(query_pos, dtype=tf.float32): """Bias for self-attention where attention to the right is disallowed. Args: query_pos: a mtf.Tensor with shape [..., length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., length_dim, memory_length_dim] """ memory_pos = rename_length_to_memory_length(query_pos) return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9
def attention_mask_ignore_padding(inputs, dtype=tf.float32): """Bias for encoder-decoder attention. Args: inputs: a mtf.Tensor with shape [..., length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., memory_length_dim] """ inputs = rename_length_to_memory_length(inputs) return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9
def attention_mask_same_segment( query_segment, memory_segment=None, dtype=tf.float32): """Bias for attention where attention between segments is disallowed. Args: query_segment: a mtf.Tensor with shape [..., length_dim] memory_segment: a mtf.Tensor with shape [..., memory_length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., length_dim, memory_length_dim] """ memory_segment = rename_length_to_memory_length( memory_segment or query_segment) return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9
def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq, curr_scores, curr_finished): """Given sequences and scores, will gather the top k=beam size sequences. Args: finished_seq: Current finished sequences. [batch, beam, length] finished_scores: scores for each of these sequences. [batch, beam] finished_flags: finished bools for each of these sequences. [batch, beam] curr_seq: current topk sequence that has been grown by one position. [batch, beam, length] curr_scores: scores for each of these sequences. [batch, beam] curr_finished: Finished flags for each of these sequences. [batch, beam] Returns: Tuple of (Topk sequences based on scores, log probs of these sequences, Finished flags of these sequences, None (no states)) """ # Set the scores of the unfinished seq in curr_seq to large negative # values curr_scores += (1. - mtf.cast(curr_finished, curr_scores.dtype)) * -INF unused_batch_dim, beam_dim, unused_length_dim = finished_seq.shape.dims # concatenating the sequences and scores along beam axis def _my_concat(a, b): a = mtf.rename_dimension(a, "beam", "triple_beam") b = mtf.rename_dimension(b, "double_beam", "triple_beam") return mtf.concat([a, b], "triple_beam") curr_finished_seq = _my_concat(finished_seq, curr_seq) curr_finished_scores = _my_concat(finished_scores, curr_scores) curr_finished_flags = _my_concat(finished_flags, curr_finished) return compute_topk_scores_and_seq(curr_finished_seq, curr_finished_scores, curr_finished_scores, curr_finished_flags, beam_dim, "grow_finished")
def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished): """Given sequences and scores, will gather the top k=beam size sequences. Args: curr_seq: current topk sequence that has been grown by one position. [batch, beam, length] curr_scores: scores for each of these sequences. [batch_size, beam_size] curr_log_probs: log probs for each of these sequences. [batch, beam] curr_finished: Finished flags for each of these sequences. [batch, beam] Returns: Tuple of (Topk sequences based on scores, log probs of these sequences, Finished flags of these sequences) """ # Set the scores of the finished seq in curr_seq to large negative # values curr_scores += mtf.cast(curr_finished, curr_scores.dtype) * -INF return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs, curr_finished, beam_dim, "grow_alive")
def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores, finished_flags, *states): """Inner beam search loop. There are three groups of tensors, alive, finished, and topk. The alive group contains information about the current alive sequences The topk group contains information about alive + topk current decoded words the finished group contains information about finished sentences, that is, the ones that have decoded to <EOS>. These are what we return. The general beam search algorithm is as follows: While we haven't terminated (pls look at termination condition) 1. Grow the current alive to get beam*2 topk sequences 2. Among the topk, keep the top beam_size ones that haven't reached EOS into alive 3. Among the topk, keep the top beam_size ones have reached EOS into finished Repeat To make things simple with using fixed size tensors, we will end up inserting unfinished sequences into finished in the beginning. To stop that we add -ve INF to the score of the unfinished sequence so that when a true finished sequence does appear, it will have a higher score than all the unfinished ones. Args: i: loop index alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1] alive_log_probs: probabilities of the beams. [batch_size, beam_size] finished_seq: Current finished sequences. [batch_size, beam_size, i+1] finished_scores: scores for each of these sequences. [batch_size, beam_size] finished_flags: finished bools for each of these sequences. [batch_size, beam_size] *states: mtf Tensors Returns: Tuple of (Incremented loop index New alive sequences, Log probs of the alive sequences, New finished sequences, Scores of the new finished sequences, Flags indicating which sequence in finished as reached EOS, dict of final decoding states) """ states = [ mtf.replace_dimensions(state, batch_and_beam_dim, [batch_dim, beam_dim]) for state in states ] # Each inner loop, we carry out three steps: # 1. Get the current topk items. # 2. Extract the ones that have finished and haven't finished # 3. Recompute the contents of finished based on scores. (top2k_seq, top2k_log_probs, top2k_scores, top2k_finished, new_states, first_selector) = grow_topk(i, alive_seq, alive_log_probs, states) with tf.variable_scope("grow_alive"): alive_seq, alive_log_probs, _, second_selector = grow_alive( top2k_seq, top2k_scores, top2k_log_probs, top2k_finished) with tf.variable_scope("grow_finished"): finished_seq, finished_scores, finished_flags, _ = grow_finished( finished_seq, finished_scores, finished_flags, top2k_seq, top2k_scores, top2k_finished) old_beam_dim = mtf.Dimension("old_beam", beam_dim.size) selector = mtf.einsum([ mtf.rename_dimension(first_selector, beam_dim.name, old_beam_dim.name), second_selector ], output_shape=[batch_dim, old_beam_dim, beam_dim]) gathered_states = [] if use_tpu and layout is not None and mesh_shape is not None: # This hack combines the beam dimension with some of the batch dimension. # It makes gathering faster on TPU. # # Instead of multiplying by a [beam, beam] selector matrix, we instead # multiply by a [minor_batch*beam, minor_batch*beam] selector matrix. # This is theoretically more FLOPs, but it brings the matrix size closer # to the magic optimal value of 128. # # TODO(noam): file a bug with the XLA team to do this automatically major_batch_size = mtf.tensor_dim_to_mesh_dim_size( layout, mesh_shape, batch_dim) major_batch = mtf.Dimension(batch_dim.name, major_batch_size) minor_batch = mtf.Dimension("minor_batch", batch_dim.size // major_batch.size) old_minor_batch = mtf.Dimension("old_minor_batch", minor_batch.size) old_combined = mtf.Dimension("old_combined", minor_batch.size * beam_dim.size) combined = mtf.Dimension("new_combined", old_combined.size) same_minor_batch = mtf.to_float( mtf.equal(mtf.range(mesh, old_minor_batch, tf.float32), mtf.range(mesh, minor_batch, tf.float32))) selector = mtf.reshape( selector, [major_batch, minor_batch, old_beam_dim, beam_dim]) selector = mtf.einsum([selector, same_minor_batch], output_shape=[ major_batch, old_minor_batch, old_beam_dim, minor_batch, beam_dim ], reduced_dims=[]) selector = mtf.reshape(selector, [major_batch, old_combined, combined]) for state in new_states: s = mtf.replace_dimensions(state, [batch_dim, beam_dim], [major_batch, old_combined]) s = mtf.einsum([s, mtf.cast(selector, state.dtype)], reduced_dims=[old_combined], output_shape=mtf.replace_dimensions( state.shape, [batch_dim, beam_dim], [major_batch, combined])) gathered_states.append( mtf.replace_dimensions(s, [major_batch, combined], batch_and_beam_dim)) else: for state in new_states: state = mtf.einsum([ mtf.rename_dimension(state, beam_dim.name, old_beam_dim.name), mtf.cast(selector, state.dtype) ], reduced_dims=[old_beam_dim], output_shape=state.shape) state = mtf.replace_dimensions(state, [batch_dim, beam_dim], batch_and_beam_dim) gathered_states.append(state) return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores, finished_flags) + tuple(gathered_states)
def grow_topk(i, alive_seq, alive_log_probs, states=None): r"""Inner beam search loop. This function takes the current alive sequences, and grows them to topk sequences where k = 2*beam. We use 2*beam because, we could have beam_size number of sequences that might hit <EOS> and there will be no alive sequences to continue. With 2*beam_size, this will not happen. This relies on the assumption the vocab size is > beam size. If this is true, we'll have at least beam_size non <EOS> extensions if we extract the next top 2*beam words. Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to https://arxiv.org/abs/1609.08144. Args: i: loop index alive_seq: Topk sequences decoded so far [batch, beam, length] alive_log_probs: probabilities of these sequences. [batch, beam] states: optional list of mtf.Tensor Returns: Tuple of (Topk sequences extended by the next word, The log probs of these sequences, The scores with length penalty of these sequences, Flags indicating which of these sequences have finished decoding, list of transformed decoding states) """ logits, new_states = logits_fn(i, alive_seq, states) batch_dim, beam_dim, vocab_dim = logits.shape.dims # Convert logits to normalized log probs candidate_log_probs = mtf.log_softmax(logits, vocab_dim) # Multiply the probabilities by the current probabilities of the beam. # (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1) log_probs = candidate_log_probs + alive_log_probs length_penalty = mtf.pow(((5. + mtf.cast(i + 1, logits.dtype)) / 6.), alpha) # scores have shape [batch, beam, vocab] curr_scores = log_probs / length_penalty # We find the top 2k sequences to make sure we get k alive sequences. # # TODO(noam): This is inefficient. We should separately compute the k # finished sequences (previously alive sequences + EOS), and the top k new # alive sequences. double_beam = mtf.Dimension("double_beam", beam_dim.size * 2) if use_tpu and layout is not None and mesh_shape is not None: # Do some partial top-k-ing first locally to avoid communication. # We reshape the logits from: # [batch, beam, vocab] to # [batch, beam, major_vocab, minor_vocab] # We first reduce (locally) across the minor_vocab dimension. This makes # the thing we need to broadcast smaller. # This also enables our shortcut of only picking the top num_prefilter # sequences per beam per major_vocab in the first pass. major_vocab_size = mtf.tensor_dim_to_mesh_dim_size( layout, mesh_shape, vocab_dim) major_vocab = mtf.Dimension(vocab_dim.name, major_vocab_size) minor_vocab = mtf.Dimension("minor_vocab", vocab_dim.size // major_vocab_size) curr_scores = mtf.reshape( curr_scores, [batch_dim, beam_dim, major_vocab, minor_vocab]) prefilter = mtf.Dimension("prefilter", num_prefilter or double_beam.size) # shape = [batch_dim, beam_dim, major_vocab, prefilter] top_scores, top_minor_vocab_ids = mtf.top_k( curr_scores, reduced_dim=minor_vocab, k_dim=prefilter) combined = mtf.Dimension( "combined", beam_dim.size * major_vocab.size * prefilter.size) top_scores = mtf.reshape(top_scores, [batch_dim, combined]) top_minor_vocab_ids = mtf.reshape(top_minor_vocab_ids, [batch_dim, combined]) # shpae = [batch_dim, double_beam] # ids are indices representing (beam, major_vocab, prefilter) top_scores, top_combined_ids = mtf.top_k(top_scores, reduced_dim=combined, k_dim=double_beam) top_minor_vocab_ids = mtf.gather( top_minor_vocab_ids, top_combined_ids, combined, output_shape=[batch_dim, double_beam]) top_beam_index = top_combined_ids // (major_vocab.size * prefilter.size) top_combined_ids -= top_beam_index * (major_vocab.size * prefilter.size) top_major_vocab_ids = top_combined_ids // prefilter.size top_combined_ids -= top_major_vocab_ids * prefilter.size top_ids = top_major_vocab_ids * minor_vocab.size + top_minor_vocab_ids else: beam_and_vocab_dim = mtf.Dimension("beam_and_vocab", beam_dim.size * vocab_dim.size) flat_shape = mtf.Shape([batch_dim, beam_and_vocab_dim]) # Flatten out (beam_size, vocab_size) probs into a list of possibilities flat_curr_scores = mtf.reshape(curr_scores, flat_shape, name="flatten_scores") top_scores, top_ids = mtf.top_k(flat_curr_scores, reduced_dim=beam_and_vocab_dim, k_dim=double_beam) # Work out what beam the top probs are in. top_beam_index = top_ids // vocab_dim.size top_ids %= vocab_dim.size # Unflatten the ids # Recovering the log probs because we will need to send them back top_log_probs = top_scores * length_penalty selector = mtf.one_hot(top_beam_index, beam_dim, dtype=tf.float32) def my_gather(tensor): return mtf.gather(tensor, top_beam_index, beam_dim, output_shape=mtf.Shape([ double_beam if d == beam_dim else d for d in tensor.shape.dims ])) # Gather up the most probable 2*beams both for the ids and finished_in_alive # bools top_seq = my_gather(alive_seq) # Append the most probable alive top_seq += top_ids * mtf.one_hot(i, length_dim, dtype=tf.int32) top_finished = mtf.equal(top_ids, eos_id) return (top_seq, top_log_probs, top_scores, top_finished, new_states, selector)
def multihead_self_attention_incremental(query_antecedent, prev_k, prev_v, step_num, master_dtype, slice_dtype, name="multihead_attention"): """Incremental self-attention (one decode step). In order to use only one variable containing the four weight matrices packed together, we insist that the query and memory antecedents have the same dimensionality (io_channels) and that the keys and values have the same dimensionality (kv_channels). Args: query_antecedent: a mtf.Tensor with shape [batch..., io_channels] prev_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels] prev_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels] step_num: mtf Scalar with dtype tf.int32 master_dtype: a tf.dtype slice_dtype: a tf.dtype name: an optional string. Returns: y: A mtf.Tensor with shape [batch..., io_channels] new_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels] new_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels] Raises: ValueError: if the dimensions do not match. """ batch_dims = query_antecedent.shape.dims[:-1] io_channels = query_antecedent.shape.dims[-1] heads, memory_length, kv_channels = prev_k.shape.dims[-3:] with tf.variable_scope(name, default_name="multihead_attention"): wq, wk, wv, wo = multihead_attention_vars( query_antecedent.mesh, heads, io_channels, kv_channels, master_dtype, slice_dtype, query_antecedent.dtype) memory_antecedent = query_antecedent q = mtf.einsum( [query_antecedent, wq], mtf.Shape(batch_dims + [heads, kv_channels])) k = mtf.einsum( [memory_antecedent, wk], mtf.Shape(batch_dims + [heads, kv_channels])) v = mtf.einsum( [memory_antecedent, wv], mtf.Shape(batch_dims + [heads, kv_channels])) k = prev_k + mtf.multiply( k, mtf.one_hot(step_num, memory_length, dtype=prev_k.dtype), output_shape=prev_k.shape) v = prev_v + mtf.multiply( v, mtf.one_hot(step_num, memory_length, dtype=prev_v.dtype), output_shape=prev_v.shape) mask = mtf.cast( mtf.greater(mtf.range( query_antecedent.mesh, memory_length, dtype=tf.int32), step_num), q.dtype) * -1e9 o = dot_product_attention(q, k, v, mask) y = mtf.einsum([o, wo], query_antecedent.shape) return y, k, v
def masked_local_attention_1d(x, kv_channels, heads, window_size=128, master_dtype=tf.float32, slice_dtype=tf.float32, length_per_split=None, return_kv=None, params=None, name=None): """Attention to the source position and a neighborhood to the left of it. Attention for a given query position p can only see memory positions in the range (p - window_size, p]. Args: x: a mtf.Tensor with shape batch_dims + [length, io_channels] kv_channels: a mtf.Dimension (the size of the key and value vectors) heads: a mtf.Dimension (the number of heads) window_size: an integer master_dtype: a tf.dtype (deprecated - use params arg) slice_dtype: a tf.dtype (deprecated - use params arg) length_per_split: an optional integer indicating the part of the length dimension per processor. You can omit if the length dimension is not split. return_kv: an optional list onto which to append the computed k and v. params: an optional quadruple of Tensors (see multihead_attention_params()) name: an optional string. Returns: a Tensor with the same shape as x Raises: ValueError: if channels or depth don't match. """ with tf.variable_scope( name, default_name="masked_local_attention_1d", values=[x]): batch_dims = x.shape.dims[:-2] length, io_channels = x.shape.dims[-2:] if params is None: wq, wk, wv, wo = multihead_attention_vars( x.mesh, heads, io_channels, kv_channels, master_dtype, slice_dtype, x.dtype) else: wq, wk, wv, wo = params # Get query q, keys k and values v. qkv_shape = mtf.Shape(batch_dims + [heads, length, kv_channels]) q = mtf.einsum([x, wq], qkv_shape) k = mtf.einsum([x, wk], qkv_shape) v = mtf.einsum([x, wv], qkv_shape) if return_kv is not None: return_kv.extend([k, v]) # Choose a suitable block size. # We choose the greatest divisor of length_per_split less than or equal # to max(window_size, 128) if length_per_split is None: length_per_split = length.size block_length = max(window_size, 128) while length_per_split % block_length != 0: block_length -= 1 query_block_length = mtf.Dimension("query_block_length", block_length) memory_block_length = mtf.Dimension("memory_block_length", block_length) # The num_blocks dimension gets the same name as the length dimension, # so it will be split in the same way. num_blocks = mtf.Dimension(length.name, length.size // block_length) q_shape = batch_dims + [heads, num_blocks, query_block_length, kv_channels] kv_shape = batch_dims + [ heads, num_blocks, memory_block_length, kv_channels] q = mtf.reshape(q, q_shape) k = mtf.reshape(k, kv_shape) v = mtf.reshape(v, kv_shape) # augment the keys and values for each block with keys and values for # the previous window_size timesteps. k = mtf.left_halo_exchange(k, num_blocks, memory_block_length, window_size) v = mtf.left_halo_exchange(v, num_blocks, memory_block_length, window_size) padded_memory_block_length = mtf.Dimension( "memory_block_length", window_size + block_length) mpos = mtf.range(x.mesh, padded_memory_block_length, tf.float32) qpos = mtf.range(x.mesh, query_block_length, tf.float32) + window_size # prevent looking forward mask = mtf.cast(mtf.greater(mpos, qpos), x.dtype) * -1e9 # prevent looking >=block_length timesteps backward mask += mtf.cast(mtf.less_equal(mpos, qpos - block_length), x.dtype) * -1e9 # Note: The first window_size-1 positions can see back into pre-time # where all the keys and values are zero. We could mask this out, but we # don't. o = dot_product_attention(q, k, v, mask=mask) o = mtf.reshape(o, batch_dims + [heads, length, kv_channels]) return mtf.einsum([o, wo], mtf.Shape(batch_dims + [length, io_channels]))
def multihead_self_attention_memory_compressed(x, mask_right, compression_factor, kv_channels, heads, dropout=0.0, dropout_broadcast_dims=None, master_dtype=tf.float32, slice_dtype=tf.float32, name="multihead_attention"): """Memory-compressed self-attention. The memory is first average-pooled (strided) to make it shorter by a factor of compression_factor. Args: x: a mtf.Tensor with shape [<batch_dims>, query_length, io_channels] mask_right: a boolean compression_factor: an integer kv_channels: a mtf.Dimension (the size of the key and value vectors) heads: a mtf.Dimension (the number of heads) dropout: a floating point value dropout_broadcast_dims: an optional list of mtf.Dimension master_dtype: a tf.dtype slice_dtype: a tf.dtype name: an optional string. Returns: A mtf.Tensor with shape [batch, query_length, io_channels] Raises: ValueError: if the dimensions do not match. """ batch_dims = x.shape.dims[:-2] length, io_channels = x.shape.dims[-2:] with tf.variable_scope(name, default_name="compressed_attention", values=[x]): wq, wk, wv, wo = multihead_attention_vars( x.mesh, heads, io_channels, kv_channels, master_dtype, slice_dtype, x.dtype) memory_antecedent = compress_mean(x, length, compression_factor) memory_antecedent = rename_length_to_memory_length(memory_antecedent) memory_length = memory_antecedent.shape.dims[-2] q = mtf.einsum( [x, wq], mtf.Shape(batch_dims + [heads, length, kv_channels])) k = mtf.einsum( [memory_antecedent, wk], mtf.Shape(batch_dims + [heads, memory_length, kv_channels])) v = mtf.einsum( [memory_antecedent, wv], mtf.Shape(batch_dims + [heads, memory_length, kv_channels])) if mask_right: query_pos = mtf.range(x.mesh, length, dtype=tf.int32) memory_pos = ( mtf.range(x.mesh, memory_length, dtype=tf.int32) * compression_factor + (compression_factor - 1)) mask = mtf.cast(mtf.greater(memory_pos, query_pos), x.dtype) * -1e9 else: mask = None o = dot_product_attention( q, k, v, mask, dropout, dropout_broadcast_dims, extra_logit=0.0) return mtf.einsum( [o, wo], mtf.Shape(batch_dims + [length, io_channels]))