Beispiel #1
0
def add_timing_signal_1d_given_position(x,
                                        position,
                                        min_timescale=1.0,
                                        max_timescale=1.0e4):
    """Adds sinusoids of diff frequencies to a Tensor, with timing position given.

  Args:
    x: a Tensor with shape [batch, length, channels]
    position: a Tensor with shape [batch, length]
    min_timescale: a float
    max_timescale: a float

  Returns:
    a Tensor the same shape as x.
  """
    channels = common_layers.shape_list(x)[2]
    num_timescales = channels // 2
    log_timescale_increment = (
        math.log(float(max_timescale) / float(min_timescale)) /
        (tf.to_float(num_timescales) - 1))
    inv_timescales = min_timescale * tf.exp(
        tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
    scaled_time = (tf.expand_dims(tf.to_float(position), 2) *
                   tf.expand_dims(tf.expand_dims(inv_timescales, 0), 0))
    signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
    signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
    signal = common_layers.cast_like(signal, x)
    return x + signal
    def top(self, body_output, _):
        """Generate logits.

    Args:
      body_output: A Tensor with shape [batch, p0, p1, body_input_depth]
    Returns:
      logits: A Tensor with shape  [batch, p0, p1, ?, vocab_size].
    """
        if self._model_hparams.symbol_modality_skip_top:
            return tf.expand_dims(body_output, 3)

        if self._model_hparams.shared_embedding_and_softmax_weights:
            scope_name = "shared"
            reuse = tf.AUTO_REUSE
        else:
            scope_name = "softmax"
            reuse = False

        with tf.variable_scope(scope_name, reuse=reuse):
            body_output_shape = common_layers.shape_list(body_output)
            var = self._get_weights(body_output_shape[-1])
            body_output = tf.reshape(body_output, [-1, body_output_shape[-1]])
            logits = tf.matmul(body_output, var, transpose_b=True)
            return tf.reshape(logits,
                              body_output_shape[:-1] + [1, self._vocab_size])
Beispiel #3
0
def combine_last_two_dimensions(x):
    """Reshape x so that the last two dimension become one.

  Args:
    x: a Tensor with shape [..., a, b]

  Returns:
    a Tensor with shape [..., ab]
  """
    x_shape = common_layers.shape_list(x)
    a, b = x_shape[-2:]
    return tf.reshape(x, x_shape[:-2] + [a * b])
def merge_beam_dim(tensor):
  """Reshapes first two dimensions in to single dimension.

  Args:
    tensor: Tensor to reshape of shape [A, B, ...]

  Returns:
    Reshaped tensor of shape [A*B, ...]
  """
  shape = common_layers.shape_list(tensor)
  shape[0] *= shape[1]  # batch -> batch * beam_size
  shape.pop(1)  # Remove beam dim
  return tf.reshape(tensor, shape)
Beispiel #5
0
def add_timing_signal_1d(x,
                         min_timescale=1.0,
                         max_timescale=1.0e4,
                         start_index=0):
    """Adds a bunch of sinusoids of different frequencies to a Tensor.

  Each channel of the input Tensor is incremented by a sinusoid of a different
  frequency and phase.

  This allows attention to learn to use absolute and relative positions.
  Timing signals should be added to some precursors of both the query and the
  memory inputs to attention.

  The use of relative position is possible because sin(x+y) and cos(x+y) can be
  experessed in terms of y, sin(x) and cos(x).

  In particular, we use a geometric sequence of timescales starting with
  min_timescale and ending with max_timescale.  The number of different
  timescales is equal to channels / 2. For each timescale, we
  generate the two sinusoidal signals sin(timestep/timescale) and
  cos(timestep/timescale).  All of these sinusoids are concatenated in
  the channels dimension.

  Args:
    x: a Tensor with shape [batch, length, channels]
    min_timescale: a float
    max_timescale: a float
    start_index: index of first position

  Returns:
    a Tensor the same shape as x.
  """
    length = common_layers.shape_list(x)[1]
    channels = common_layers.shape_list(x)[2]
    signal = get_timing_signal_1d(length, channels, min_timescale,
                                  max_timescale, start_index)
    signal = common_layers.cast_like(signal, x)
    return x + signal
def unmerge_beam_dim(tensor, batch_size, beam_size):
  """Reshapes first dimension back to [batch_size, beam_size].

  Args:
    tensor: Tensor to reshape of shape [batch_size*beam_size, ...]
    batch_size: Tensor, original batch size.
    beam_size: int, original beam size.

  Returns:
    Reshaped tensor of shape [batch_size, beam_size, ...]
  """
  shape = common_layers.shape_list(tensor)
  new_shape = [batch_size] + [beam_size] + shape[1:]
  return tf.reshape(tensor, new_shape)
Beispiel #7
0
def padded_accuracy(predictions,
                    labels,
                    weights_fn=common_layers.weights_nonzero):
    """Percentage of times that predictions matches labels on non-0s."""
    # If the last dimension is 1 then we're using L1/L2 loss.
    if common_layers.shape_list(predictions)[-1] == 1:
        return rounding_accuracy(predictions, labels, weights_fn=weights_fn)
    with tf.variable_scope("padded_accuracy", values=[predictions, labels]):
        padded_predictions, padded_labels = common_layers.pad_with_zeros(
            predictions, labels)
        weights = weights_fn(padded_labels)
        outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
        padded_labels = tf.to_int32(padded_labels)
        return tf.to_float(tf.equal(outputs, padded_labels)), weights
Beispiel #8
0
def padded_sequence_accuracy(predictions,
                             labels,
                             weights_fn=common_layers.weights_nonzero):
    """Percentage of times that predictions matches labels everywhere (non-0)."""
    # If the last dimension is 1 then we're using L1/L2 loss.
    if common_layers.shape_list(predictions)[-1] == 1:
        return rounding_sequence_accuracy(predictions,
                                          labels,
                                          weights_fn=weights_fn)
    with tf.variable_scope("padded_sequence_accuracy",
                           values=[predictions, labels]):
        padded_predictions, padded_labels = common_layers.pad_with_zeros(
            predictions, labels)
        weights = weights_fn(padded_labels)

        # Flatten, keeping batch dim (and num_classes dim for predictions)
        # TPU argmax can only deal with a limited number of dimensions
        predictions_shape = common_layers.shape_list(padded_predictions)
        batch_size = predictions_shape[0]
        num_classes = predictions_shape[-1]
        flat_size = common_layers.list_product(
            common_layers.shape_list(padded_labels)[1:])
        padded_predictions = tf.reshape(padded_predictions, [
            batch_size,
            common_layers.list_product(predictions_shape[1:-1]), num_classes
        ])
        padded_labels = tf.reshape(padded_labels, [batch_size, flat_size])
        weights = tf.reshape(weights, [batch_size, flat_size])

        outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
        padded_labels = tf.to_int32(padded_labels)
        not_correct = tf.to_float(tf.not_equal(outputs,
                                               padded_labels)) * weights
        axis = list(range(1, len(outputs.get_shape())))
        correct_seq = 1.0 - tf.minimum(1.0,
                                       tf.reduce_sum(not_correct, axis=axis))
        return correct_seq, tf.constant(1.0)
Beispiel #9
0
def split_last_dimension(x, n):
    """Reshape x so that the last dimension becomes two dimensions.

  The first of these two dimensions is n.

  Args:
    x: a Tensor with shape [..., m]
    n: an integer.

  Returns:
    a Tensor with shape [..., n, m/n]
  """
    x_shape = common_layers.shape_list(x)
    m = x_shape[-1]
    if isinstance(m, int) and isinstance(n, int):
        assert m % n == 0
    return tf.reshape(x, x_shape[:-1] + [n, m // n])
Beispiel #10
0
def padded_accuracy_topk(predictions,
                         labels,
                         k,
                         weights_fn=common_layers.weights_nonzero):
    """Percentage of times that top-k predictions matches labels on non-0s."""
    with tf.variable_scope("padded_accuracy_topk",
                           values=[predictions, labels]):
        padded_predictions, padded_labels = common_layers.pad_with_zeros(
            predictions, labels)
        weights = weights_fn(padded_labels)
        effective_k = tf.minimum(
            k,
            common_layers.shape_list(padded_predictions)[-1])
        _, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
        outputs = tf.to_int32(outputs)
        padded_labels = tf.to_int32(padded_labels)
        padded_labels = tf.expand_dims(padded_labels, axis=-1)
        padded_labels += tf.zeros_like(outputs)  # Pad to same shape.
        same = tf.to_float(tf.equal(outputs, padded_labels))
        same_topk = tf.reduce_sum(same, axis=-1)
        return same_topk, weights
Beispiel #11
0
    def body(self, features):
        """Transformer main model_fn.

    Args:
      features: Map of features to the model. Should contain the following:
          "inputs": Transformer inputs.
              [batch_size, input_length, 1, hidden_dim].
          "targets": Target decoder outputs.
              [batch_size, decoder_length, 1, hidden_dim]
          "target_space_id": A scalar int from data_generators.problem.SpaceID.

    Returns:
      Final decoder representation. [batch_size, decoder_length, hidden_dim]
    """
        hparams = self._hparams

        if self.has_input:
            inputs = features["inputs"]
            target_space = features["target_space_id"]
            encoder_output, encoder_decoder_attention_bias = self.encode(
                inputs, target_space, hparams, features=features)
        else:
            encoder_output, encoder_decoder_attention_bias = (None, None)

        targets = features["targets"]
        targets_shape = common_layers.shape_list(targets)
        targets = common_layers.flatten4d3d(targets)
        decoder_input, decoder_self_attention_bias = transformer_prepare_decoder(
            targets, hparams, features=features)
        decoder_output = self.decode(decoder_input,
                                     encoder_output,
                                     encoder_decoder_attention_bias,
                                     decoder_self_attention_bias,
                                     hparams,
                                     nonpadding=features_to_nonpadding(
                                         features, "targets"))

        return tf.reshape(decoder_output, targets_shape)
Beispiel #12
0
def transformer_prepare_decoder(targets, hparams, features=None):
    """Prepare one shard of the model for the decoder.

  Args:
    targets: a Tensor.
    hparams: run hyperparameters
    features: optionally pass the entire features dictionary as well.
      This is needed now for "packed" datasets.

  Returns:
    decoder_input: a Tensor, bottom of decoder stack
    decoder_self_attention_bias: a bias tensor for use in decoder self-attention
  """
    decoder_self_attention_bias = (
        common_attention.attention_bias_lower_triangle(
            common_layers.shape_list(targets)[1]))

    if features and "targets_segmentation" in features:
        # "Packed" dataset - keep the examples from seeing each other.
        targets_segmentation = features["targets_segmentation"]
        targets_position = features["targets_position"]
        decoder_self_attention_bias += common_attention.attention_bias_same_segment(
            targets_segmentation, targets_segmentation)
    else:
        targets_position = None
    decoder_input = common_layers.shift_right_3d(targets)
    if targets_position is not None:
        decoder_input = common_attention.add_timing_signal_1d_given_position(
            decoder_input, targets_position)
    else:
        decoder_input = common_attention.add_timing_signal_1d(decoder_input)

    if hparams.activation_dtype == "bfloat16":
        decoder_self_attention_bias = tf.cast(decoder_self_attention_bias,
                                              tf.bfloat16)
    return (decoder_input, decoder_self_attention_bias)
def beam_search(symbols_to_logits_fn,
                initial_ids,
                beam_size,
                decode_length,
                vocab_size,
                alpha,
                states=None,
                kv_encdecs=None,
                eos_id=EOS_ID,
                stop_early=True):
  """Beam search with length penalties.

  Requires a function that can take the currently decoded symbols and return
  the logits for the next symbol. The implementation is inspired by
  https://arxiv.org/abs/1609.08144.

  When running, the beam search steps can be visualized by using tfdbg to watch
  the operations generating the output ids for each beam step.  These operations
  have the pattern:
    (alive|finished)_topk_(seq,scores)

  Operations marked `alive` represent the new beam sequences that will be
  processed in the next step.  Operations marked `finished` represent the
  completed beam sequences, which may be padded with 0s if no beams finished.

  Operations marked `seq` store the full beam sequence for the time step.
  Operations marked `scores` store the sequence's final log scores.

  The beam search steps will be processed sequentially in order, so when
  capturing observed from these operations, tensors, clients can make
  assumptions about which step is being recorded.

  WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this
  means that the shape of the 2nd dimension of these tensors will not be
  available (i.e. set to None) inside symbols_to_logits_fn.

  Args:
    symbols_to_logits_fn: Interface to the model, to provide logits.
      Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size]
    initial_ids: Ids to start off the decoding, this will be the first thing
      handed to symbols_to_logits_fn (after expanding to beam size)
      [batch_size]
    beam_size: Size of the beam.
    decode_length: Number of steps to decode for.
    vocab_size: Size of the vocab, must equal the size of the logits returned by
      symbols_to_logits_fn
    alpha: alpha for length penalty.
    states: dict (possibly nested) of decoding states.
    kv_encdecs: A dict, representing the key and value for encoder-decoder
      attention used by decoding (inference).
    eos_id: ID for end of sentence.
    stop_early: a boolean - stop once best sequence is provably determined.

  Returns:
    Tuple of
    (decoded beams [batch_size, beam_size, decode_length]
     decoding probabilities [batch_size, beam_size])
  """
  batch_size = common_layers.shape_list(initial_ids)[0]

  # Assume initial_ids are prob 1.0
  initial_log_probs = tf.constant([[0.] + [-INF] * (beam_size - 1)])
  # Expand to beam_size (batch_size, beam_size)
  alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1])

  # Expand each batch and state to beam_size
  alive_seq = expand_to_beam_size(initial_ids, beam_size)
  alive_seq = tf.expand_dims(alive_seq, axis=2)  # (batch_size, beam_size, 1)
  alive_seq = tf.tile(alive_seq, [1, 1, decode_length + 1])
  if states:
    states = nest.map_structure(
        lambda state: expand_to_beam_size(state, beam_size), states)
  else:
    states = {}

  # Finished will keep track of all the sequences that have finished so far
  # Finished log probs will be negative infinity in the beginning
  # finished_flags will keep track of booleans
  finished_seq = tf.zeros(common_layers.shape_list(alive_seq), tf.int32)
  # Setting the scores of the initial to negative infinity.
  finished_scores = tf.ones([batch_size, beam_size]) * -INF
  finished_flags = tf.zeros([batch_size, beam_size], tf.bool)

  def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq,
                    curr_scores, curr_finished):
    """Given sequences and scores, will gather the top k=beam size sequences.

    Args:
      finished_seq: Current finished sequences.
        [batch_size, beam_size, current_decoded_length]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]
      finished_flags: finished bools for each of these sequences.
        [batch_size, beam_size]
      curr_seq: current topk sequence that has been grown by one position.
        [batch_size, beam_size, current_decoded_length]
      curr_scores: scores for each of these sequences. [batch_size, beam_size]
      curr_finished: Finished flags for each of these sequences.
        [batch_size, beam_size]
    Returns:
      Tuple of
        (Topk sequences based on scores,
         log probs of these sequences,
         Finished flags of these sequences)
    """
    # Set the scores of the unfinished seq in curr_seq to large negative
    # values
    curr_scores += (1. - tf.to_float(curr_finished)) * -INF
    # concatenating the sequences and scores along beam axis
    curr_finished_seq = tf.concat([finished_seq, curr_seq], axis=1)
    curr_finished_scores = tf.concat([finished_scores, curr_scores], axis=1)
    curr_finished_flags = tf.concat([finished_flags, curr_finished], axis=1)
    return compute_topk_scores_and_seq(
        curr_finished_seq, curr_finished_scores, curr_finished_scores,
        curr_finished_flags, beam_size, "grow_finished")

  def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished):
    """Given sequences and scores, will gather the top k=beam size sequences.

    Args:
      curr_seq: current topk sequence that has been grown by one position.
        [batch_size, beam_size, i+1]
      curr_scores: scores for each of these sequences. [batch_size, beam_size]
      curr_log_probs: log probs for each of these sequences.
        [batch_size, beam_size]
      curr_finished: Finished flags for each of these sequences.
        [batch_size, beam_size]
    Returns:
      Tuple of
        (Topk sequences based on scores,
         log probs of these sequences,
         Finished flags of these sequences)
    """
    # Set the scores of the finished seq in curr_seq to large negative
    # values
    curr_scores += tf.to_float(curr_finished) * -INF
    return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs,
                                       curr_finished, beam_size, "grow_alive")

  def grow_topk(i, alive_seq, alive_log_probs, states):
    r"""Inner beam search loop.

    This function takes the current alive sequences, and grows them to topk
    sequences where k = 2*beam. We use 2*beam because, we could have beam_size
    number of sequences that might hit <EOS> and there will be no alive
    sequences to continue. With 2*beam_size, this will not happen. This relies
    on the assumption the vocab size is > beam size. If this is true, we'll
    have at least beam_size non <EOS> extensions if we extract the next top
    2*beam words.
    Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to
    https://arxiv.org/abs/1609.08144.

    Args:
      i: loop index
      alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
      alive_log_probs: probabilities of these sequences. [batch_size, beam_size]
      states: dict (possibly nested) of decoding states.
    Returns:
      Tuple of
        (Topk sequences extended by the next word,
         The log probs of these sequences,
         The scores with length penalty of these sequences,
         Flags indicating which of these sequences have finished decoding,
         dict of transformed decoding states,
         Topk beam index)
    """
    # Get the logits for all the possible next symbols
    if states:
      flat_ids = tf.reshape(
          tf.slice(alive_seq, [0, 0, i], [batch_size, beam_size, 1]),
          [batch_size * beam_size, -1])
    else:
      flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1])

    # (batch_size * beam_size, decoded_length)
    if states:
      flat_states = nest.map_structure(merge_beam_dim, states)
      flat_logits, flat_states = symbols_to_logits_fn(
          flat_ids, i, flat_states, kv_encdecs)
      states = nest.map_structure(
          lambda t: unmerge_beam_dim(t, batch_size, beam_size), flat_states)
    else:
      flat_logits = symbols_to_logits_fn(flat_ids, i)

    logits = tf.reshape(flat_logits, [batch_size, beam_size, -1])

    # Convert logits to normalized log probs
    candidate_log_probs = common_layers.log_prob_from_logits(logits)

    # Multiply the probabilities by the current probabilities of the beam.
    # (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)
    log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)

    length_penalty = tf.pow(((5. + tf.to_float(i + 1)) / 6.), alpha)

    curr_scores = log_probs / length_penalty
    # Flatten out (beam_size, vocab_size) probs in to a list of possibilities
    flat_curr_scores = tf.reshape(curr_scores, [-1, beam_size * vocab_size])

    topk_scores, topk_ids = top_k_with_unique(flat_curr_scores, k=beam_size * 2)

    # Recovering the log probs because we will need to send them back
    topk_log_probs = topk_scores * length_penalty

    # Work out what beam the top probs are in.
    topk_beam_index = topk_ids // vocab_size
    topk_ids %= vocab_size  # Unflatten the ids

    # Gather up the most probable 2*beams both for the ids and
    # finished_in_alive bools
    topk_seq = tf.batch_gather(alive_seq, topk_beam_index)

    # Update the most probable alive
    indices = tf.reshape(
        tf.one_hot(i + 1, decode_length + 1, dtype=topk_seq.dtype),
        [1, 1, decode_length + 1])
    topk_seq += tf.expand_dims(topk_ids, axis=2) * indices

    topk_finished = tf.equal(topk_ids, eos_id)

    return (topk_seq, topk_log_probs, topk_scores, topk_finished, states,
            topk_beam_index)

  def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores,
                 finished_flags, states):
    """Inner beam search loop.

    There are three groups of tensors, alive, finished, and topk.
    The alive group contains information about the current alive sequences
    The topk group contains information about alive + topk current decoded words
    the finished group contains information about finished sentences, that is,
    the ones that have decoded to <EOS>. These are what we return.
    The general beam search algorithm is as follows:
    While we haven't terminated (pls look at termination condition)
      1. Grow the current alive to get beam*2 topk sequences
      2. Among the topk, keep the top beam_size ones that haven't reached EOS
      into alive
      3. Among the topk, keep the top beam_size ones have reached EOS into
      finished
    Repeat
    To make things simple with using fixed size tensors, we will end
    up inserting unfinished sequences into finished in the beginning. To stop
    that we add -ve INF to the score of the unfinished sequence so that when a
    true finished sequence does appear, it will have a higher score than all the
    unfinished ones.

    Args:
      i: loop index
      alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
      alive_log_probs: probabilities of the beams. [batch_size, beam_size]
      finished_seq: Current finished sequences.
        [batch_size, beam_size, i+1]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]
      finished_flags: finished bools for each of these sequences.
        [batch_size, beam_size]
      states: dict (possibly nested) of decoding states.

    Returns:
      Tuple of
        (Incremented loop index
         New alive sequences,
         Log probs of the alive sequences,
         New finished sequences,
         Scores of the new finished sequences,
         Flags indicating which sequence in finished as reached EOS,
         dict of final decoding states)
    """

    # Each inner loop, we carry out three steps:
    # 1. Get the current topk items.
    # 2. Extract the ones that have finished and haven't finished
    # 3. Recompute the contents of finished based on scores.
    (topk_seq, topk_log_probs, topk_scores, topk_finished, states,
     first_selector) = grow_topk(i, alive_seq, alive_log_probs, states)
    alive_seq, alive_log_probs, _, second_selector = grow_alive(
        topk_seq, topk_scores, topk_log_probs, topk_finished)

    selector = tf.batch_gather(first_selector, second_selector)
    if states:
      states = nest.map_structure(
          lambda state: tf.batch_gather(state, selector), states)

    finished_seq, finished_scores, finished_flags, _ = grow_finished(
        finished_seq, finished_scores, finished_flags, topk_seq, topk_scores,
        topk_finished)

    return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores,
            finished_flags, states)

  def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq,
                   finished_scores, unused_finished_in_finished, unused_states):
    """Checking termination condition.

    We terminate when we decoded up to decode_length or the lowest scoring item
    in finished has a greater score that the highest prob item in alive divided
    by the max length penalty

    Args:
      i: loop index
      alive_log_probs: probabilities of the beams. [batch_size, beam_size]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]

    Returns:
      Bool.
    """
    max_length_penalty = tf.pow(((5. + tf.to_float(decode_length)) / 6.), alpha)
    # The best possible score of the most likely alive sequence.
    lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty

    if not stop_early:
      # by considering the min score (in the top N beams) we ensure that
      # the decoder will keep decoding until there is at least one beam
      # (in the top N) that can be improved (w.r.t. the alive beams).
      # any unfinished beam will have score -INF - thus the min
      # will always be -INF if there is at least one unfinished beam -
      # which means the bound_is_met condition cannot be true in this case.
      lowest_score_of_finished_in_finished = tf.reduce_min(finished_scores)
    else:
      # by taking the max score we only care about the first beam;
      # as soon as this first beam cannot be beaten from the alive beams
      # the beam decoder can stop.
      # similarly to the above, if the top beam is not completed, its
      # finished_score is -INF, thus it will not activate the
      # bound_is_met condition. (i.e., decoder will keep going on).
      # note we need to find the max for every sequence eparately - so, we need
      # to keep the batch dimension (see axis=1)
      lowest_score_of_finished_in_finished = tf.reduce_max(finished_scores,
                                                           axis=1)

    bound_is_met = tf.reduce_all(
        tf.greater(lowest_score_of_finished_in_finished,
                   lower_bound_alive_scores))

    return tf.logical_and(
        tf.less(i, decode_length), tf.logical_not(bound_is_met))

  (_, alive_seq, alive_log_probs, finished_seq, finished_scores,
   finished_flags, _) = tf.while_loop(
       _is_finished,
       inner_loop, [
           tf.constant(0), alive_seq, alive_log_probs, finished_seq,
           finished_scores, finished_flags, states
       ],
       shape_invariants=[
           tf.TensorShape([]),
           tf.TensorShape([batch_size, beam_size, decode_length + 1]),
           alive_log_probs.get_shape(),
           tf.TensorShape([batch_size, beam_size, decode_length + 1]),
           finished_scores.get_shape(),
           finished_flags.get_shape(),
           nest.map_structure(lambda state: state.get_shape(), states),
       ],
       parallel_iterations=1,
       back_prop=False)

  alive_seq.set_shape((None, beam_size, None))
  finished_seq.set_shape((None, beam_size, None))

  # Accounting for corner case: It's possible that no sequence in alive for a
  # particular batch item ever reached EOS. In that case, we should just copy
  # the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)
  # if 0, means that no sequence for that batch index had reached EOS. We need
  # to do the same for the scores as well.
  finished_seq = tf.where(
      tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
  finished_scores = tf.where(
      tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
  return finished_seq, finished_scores
    def estimator_spec_predict(self, features, use_tpu=False):
        """Constructs `tf.estimator.EstimatorSpec` for PREDICT (inference) mode."""
        decode_hparams = self._decode_hparams
        infer_out = self.infer(features,
                               beam_size=decode_hparams.beam_size,
                               top_beams=1,
                               alpha=decode_hparams.alpha,
                               decode_length=decode_hparams.extra_length,
                               use_tpu=use_tpu)
        if isinstance(infer_out, dict):
            outputs = infer_out["outputs"]
            scores = infer_out["scores"]
        else:
            outputs = infer_out
            scores = None

        inputs = features.get("inputs")
        if inputs is None:
            inputs = features["targets"]

        predictions = {
            "outputs": outputs,
            "scores": scores,
            "inputs": inputs,
            "targets": features.get("infer_targets"),
        }

        # Pass through remaining features
        for name, feature in features.items():
            if name not in list(predictions.keys()) + ["infer_targets"]:
                if name == "decode_loop_step":
                    continue
                if not feature.shape.as_list():
                    # All features must have a batch dimension
                    batch_size = common_layers.shape_list(outputs)[0]
                    feature = tf.tile(tf.expand_dims(feature, 0), [batch_size])
                predictions[name] = feature

        _del_dict_non_tensors(predictions)

        export_out = {"outputs": predictions["outputs"]}
        if "scores" in predictions:
            export_out["scores"] = predictions["scores"]

        # Necessary to rejoin examples in the correct order with the Cloud ML Engine
        # batch prediction API.
        if "batch_prediction_key" in predictions:
            export_out["batch_prediction_key"] = predictions[
                "batch_prediction_key"]

        remove_summaries()

        export_outputs = {
            tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            tf.estimator.export.PredictOutput(export_out)
        }
        if use_tpu:
            return tf.contrib.tpu.TPUEstimatorSpec(
                tf.estimator.ModeKeys.PREDICT,
                predictions=predictions,
                export_outputs=export_outputs)
        else:
            return tf.estimator.EstimatorSpec(tf.estimator.ModeKeys.PREDICT,
                                              predictions=predictions,
                                              export_outputs=export_outputs)
Beispiel #15
0
def multihead_attention(query_antecedent,
                        memory_antecedent,
                        bias,
                        total_key_depth,
                        total_value_depth,
                        output_depth,
                        num_heads,
                        dropout_rate,
                        block_length=128,
                        block_width=128,
                        cache=None,
                        kv_encdecs=None,
                        name="multihead_attention",
                        dropout_broadcast_dims=None,
                        **kwargs):
    """Multihead scaled-dot-product attention with input/output transformations.

  Args:
    query_antecedent: a Tensor with shape [batch, length_q, channels]
    memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
    bias: bias Tensor (see attention_bias())
    total_key_depth: an integer
    total_value_depth: an integer
    output_depth: an integer
    num_heads: an integer dividing total_key_depth and total_value_depth
    dropout_rate: a floating point number
    block_length: an integer - relevant for "local_mask_right"
    block_width: an integer - relevant for "local_unmasked"
    cache: A dict, containing Tensors which are the results of previous
      attentions, used for fast decoding. Expects the dict to contrain two keys
      ('k' and 'v'), for the initial call the values for these keys should be
      empty Tensors of the appropriate shape.
      'k': [batch_size, 0, key_channels];
      'v': [batch_size, 0, value_channels].
    kv_encdecs: A dict, representing the key and value for encoder-decoder
      attention used by decoding (inference).
    name: an optional string.
    dropout_broadcast_dims:  an optional list of integers less than 4 specifying
      in which dimensions to broadcast the dropout decisions. saves memory.
    **kwargs (dict): Parameters for the attention function
  Caching:
    WARNING: For decoder self-attention, i.e. when memory_antecedent == None,
      the caching assumes that the bias contains future masking.  The caching
      works by saving all the previous key and value values so that you are able
      to send just the last query location to this attention function. I.e. if
      the cache dict is provided it assumes the query is of the shape
      [batch_size, 1, hidden_dim] rather than the full memory.

  Returns:
    The result of the attention transformation. The output shape is
        [batch_size, length_q, hidden_dim]
    unless the cache dict is provided in which case only the last memory
    position is calculated and the output shape is [batch_size, 1, hidden_dim]

  Raises:
    ValueError: if the key depth or value depth are not divisible by the
      number of attention heads.
  """
    if total_key_depth % num_heads != 0:
        raise ValueError("Key depth (%d) must be divisible by the number of "
                         "attention heads (%d)." %
                         (total_key_depth, num_heads))
    if total_value_depth % num_heads != 0:
        raise ValueError("Value depth (%d) must be divisible by the number of "
                         "attention heads (%d)." %
                         (total_value_depth, num_heads))
    with tf.variable_scope(name,
                           default_name="multihead_attention",
                           values=[query_antecedent, memory_antecedent]):

        if cache is None or memory_antecedent is None:
            q, k, v = compute_qkv(query_antecedent, memory_antecedent,
                                  total_key_depth, total_value_depth,
                                  num_heads)
        if cache is not None:
            if bias is None:
                raise ValueError(
                    "Bias required for caching. See function docstring "
                    "for details.")

            if memory_antecedent is not None:
                # Encoder-Decoder Attention Cache
                q = compute_attention_component(query_antecedent,
                                                total_key_depth, num_heads,
                                                "q")
                k = kv_encdecs["k_encdec"]
                v = kv_encdecs["v_encdec"]
            else:
                decode_loop_step = kwargs.get("decode_loop_step")
                # Updating the tensor by adding the result of matmul(one_hot,
                # update_in_current_step). As inplace_ops only supports inplace_update
                # on the first dimension. This implementation is faster than the
                # previous version due to the elimination of expensive transpose ops.
                s = common_layers.shape_list(cache["k"])
                indices = tf.reshape(
                    tf.one_hot(decode_loop_step, s[3], dtype=k.dtype),
                    [1, 1, 1, s[3]])
                k = tf.transpose(k, [0, 2, 3, 1])
                cache["k"] = cache["k"] + k * indices
                k = tf.transpose(cache["k"], [0, 3, 1, 2])
                s = common_layers.shape_list(cache["v"])
                indices = tf.reshape(
                    tf.one_hot(decode_loop_step, s[3], dtype=k.dtype),
                    [1, 1, 1, s[3]])
                v = tf.transpose(v, [0, 2, 3, 1])
                cache["v"] = cache["v"] + v * indices
                v = tf.transpose(cache["v"], [0, 3, 1, 2])

        key_depth_per_head = total_key_depth // num_heads
        q *= key_depth_per_head**-0.5

        x = dot_product_attention(
            q,
            k,
            v,
            bias,
            dropout_rate,
            dropout_broadcast_dims=dropout_broadcast_dims)

        x = common_layers.dense(x,
                                output_depth,
                                num_heads,
                                use_bias=False,
                                name="output_transform",
                                reuse=tf.AUTO_REUSE)
        return x
Beispiel #16
0
    def _fast_decode_tpu(self,
                         features,
                         decode_length,
                         beam_size,
                         top_beams=1,
                         alpha=1.0):
        """Fast decoding.

    Implements beam search decoding on TPU.

    Args:
      features: A map of string to model features.
      decode_length: An integer, how many additional timesteps to decode.
      beam_size: An integer, number of beams.
      top_beams: An integer, how many of the beams to return.
      alpha: A float that controls the length penalty. Larger the alpha,
        stronger the preference for longer translations.

    Returns:
      A dict of decoding results {
          "outputs": integer `Tensor` of decoded ids of shape
              [batch_size, top_beams, <= decode_length]
          "scores": decoding log probs from the beam search.
      }.

    Raises:
      NotImplementedError: If there are multiple data shards or
        beam_size is one.
    """
        if beam_size == 1:
            raise NotImplementedError(
                "Greedy Decoding is not supported in this MLPerf version.")
        if "targets_segmentation" in features:
            raise NotImplementedError(
                "Decoding not supported on packed datasets "
                " If you want to decode from a dataset, use the non-packed version"
                " of the dataset when decoding.")
        hparams = self._hparams
        target_modality = self._problem_hparams.modality["targets"]

        if self.hparams.activation_dtype == "bfloat16":
            for k, v in sorted(six.iteritems(features)):
                if v.dtype == tf.float32:
                    features[k] = tf.cast(v, tf.bfloat16)

        if self.has_input:
            inputs = features["inputs"]
            if target_modality.is_class_modality:
                decode_length = 1
            else:
                decode_length = (common_layers.shape_list(inputs)[1] +
                                 features.get("decode_length", decode_length))

            # TODO(llion): Clean up this reshaping logic.
            inputs = tf.expand_dims(inputs, axis=1)
            if len(inputs.shape) < 5:
                inputs = tf.expand_dims(inputs, axis=4)
            s = common_layers.shape_list(inputs)
            batch_size = s[0]
            inputs = tf.reshape(inputs, [s[0] * s[1], s[2], s[3], s[4]])
            input_modality = self._problem_hparams.modality["inputs"]
            with tf.variable_scope(input_modality.name):
                inputs = input_modality.bottom(inputs)
            if self.hparams.activation_dtype == "bfloat16":
                inputs = tf.cast(inputs, tf.bfloat16)
            with tf.variable_scope("body"):
                encoder_output, encoder_decoder_attention_bias = self.encode(
                    inputs,
                    features["target_space_id"],
                    hparams,
                    features=features)
            partial_targets = None
        else:
            # The problem has no inputs.
            encoder_output = None
            encoder_decoder_attention_bias = None

            # Prepare partial targets.
            # In either features["inputs"] or features["targets"].
            # We force the outputs to begin with these sequences.
            partial_targets = features.get("inputs")
            if partial_targets is None:
                partial_targets = features["targets"]
            assert partial_targets is not None
            partial_targets = common_layers.expand_squeeze_to_nd(
                partial_targets, 2)
            partial_targets = tf.to_int64(partial_targets)
            partial_targets_shape = common_layers.shape_list(partial_targets)
            partial_targets_length = partial_targets_shape[1]
            decode_length = (partial_targets_length +
                             features.get("decode_length", decode_length))
            batch_size = partial_targets_shape[0]

        def preprocess_targets(targets, i):
            """Performs preprocessing steps on the targets to prepare for the decoder.

      This includes:
        - Embedding the ids.
        - Flattening to 3D tensor.
        - Optionally adding timing signals.

      Args:
        targets: A tensor, inputs ids to the decoder. [batch_size, 1].
        i: An integer, Step number of the decoding loop.

      Returns:
        A tensor, processed targets [batch_size, 1, hidden_dim].
      """
            with tf.variable_scope(target_modality.name):
                targets = target_modality.targets_bottom(targets)
            if self.hparams.activation_dtype == "bfloat16":
                targets = tf.cast(targets, tf.bfloat16)
            targets = common_layers.flatten4d3d(targets)

            # TODO(llion): Explain! Is this even needed?
            targets = tf.cond(tf.equal(i, 0), lambda: tf.zeros_like(targets),
                              lambda: targets)

            positional_encoding = common_attention.get_timing_signal_1d(
                decode_length + 1, hparams.hidden_size)
            positional_encoding_shape = positional_encoding.shape.as_list()
            positional_encoding = common_layers.cast_like(
                positional_encoding, targets)
            targets += tf.slice(positional_encoding, [0, i, 0], [
                positional_encoding_shape[0], 1, positional_encoding_shape[2]
            ])
            return targets

        decoder_self_attention_bias = (
            common_attention.attention_bias_lower_triangle(decode_length))

        def symbols_to_logits_tpu_fn(ids, i, cache, kv_encdecs):
            """Go from ids to logits for next symbol on TPU.

      Args:
        ids: A tensor, symbol IDs.
        i: An integer, step number of the decoding loop. Only used for inference
          on TPU.
        cache: A dict, containing tensors which are the results of previous
          attentions, used for fast decoding.
        kv_encdecs: A dict, representing the keys and values for encoder-decoder
          attention used by decoding (inference).

      Returns:
        ret: A tensor, computed logits.
        cache: A dict, containing tensors which are the results of previous
            attentions, used for fast decoding.
      """
            ids = ids[:, -1:]
            targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
            targets = preprocess_targets(targets, i)

            bias_shape = decoder_self_attention_bias.shape.as_list()
            bias = tf.slice(decoder_self_attention_bias, [0, 0, i, 0],
                            [bias_shape[0], bias_shape[1], 1, bias_shape[3]])

            # All other states in the cache are batch major to accomendate gather
            # op for permutation.
            tiled_encoder_output = beam_search.merge_beam_dim(
                beam_search.expand_to_beam_size(encoder_output, beam_size))
            tiled_encoder_decoder_attention_bias = beam_search.merge_beam_dim(
                beam_search.expand_to_beam_size(encoder_decoder_attention_bias,
                                                beam_size))

            with tf.variable_scope("body"):
                body_outputs = self.decode(
                    targets,
                    tiled_encoder_output,
                    tiled_encoder_decoder_attention_bias,
                    bias,
                    hparams,
                    cache,
                    kv_encdecs,
                    i,
                    nonpadding=features_to_nonpadding(features, "targets"))

            with tf.variable_scope(target_modality.name):
                logits = target_modality.top(body_outputs, None)

            ret = tf.squeeze(logits, axis=[1, 2, 3])
            if partial_targets is not None:
                # If the position is within the given partial targets, we alter the
                # logits to always return those values.
                # A faster approach would be to process the partial targets in one
                # iteration in order to fill the corresponding parts of the cache.
                # This would require broader changes, though.
                vocab_size = tf.shape(ret)[1]

                def forced_logits():
                    return tf.one_hot(
                        tf.tile(
                            tf.slice(partial_targets, [0, i],
                                     [partial_targets.shape.as_list()[0], 1]),
                            [beam_size]), vocab_size, 0.0, -1e9)

                ret = tf.cond(tf.less(i, partial_targets_length),
                              forced_logits, lambda: ret)
            return ret, cache

        ret = fast_decode_tpu(encoder_output=encoder_output,
                              symbols_to_logits_fn=symbols_to_logits_tpu_fn,
                              hparams=hparams,
                              decode_length=decode_length,
                              vocab_size=target_modality.top_dimensionality,
                              beam_size=beam_size,
                              top_beams=top_beams,
                              alpha=alpha,
                              batch_size=batch_size)
        if partial_targets is not None:
            ret["outputs"] = ret["outputs"][:, :, partial_targets_length:]
        return ret
Beispiel #17
0
def fast_decode_tpu(encoder_output,
                    symbols_to_logits_fn,
                    hparams,
                    decode_length,
                    vocab_size,
                    beam_size,
                    top_beams=1,
                    alpha=1.0,
                    sos_id=0,
                    eos_id=beam_search.EOS_ID,
                    batch_size=None,
                    scope_prefix="body/"):
    """Given encoder output and a symbols to logits function, does fast decoding.

  Implements beam search decoding for TPU.

  Args:
    encoder_output: A tensor, output from encoder.
    symbols_to_logits_fn: Incremental decoding, function mapping triple
      `(ids, step, cache)` to symbol logits.
    hparams: Run hyperparameters.
    decode_length: An integer, how many additional timesteps to decode.
    vocab_size: Output vocabulary size.
    beam_size: An integer, number of beams.
    top_beams: An integer, how many of the beams to return.
    alpha: A float that controls the length penalty. Larger the alpha, stronger
      the preference for longer translations.
    sos_id: Start-of-sequence symbol.
    eos_id: End-of-sequence symbol.
    batch_size: An integer, must be passed if there is no input.
    scope_prefix: str, prefix for decoder layer variable scopes.

  Returns:
    A dict of decoding results {
        "outputs": integer `Tensor` of decoded ids of shape
            [batch_size, top_beams, <= decode_length]
        "scores": decoding log probs from the beam search.
    }.

  Raises:
    NotImplementedError: If beam size > 1 with partial targets.
  """
    if encoder_output is not None:
        batch_size = common_layers.shape_list(encoder_output)[0]

    key_channels = hparams.attention_key_channels or hparams.hidden_size
    value_channels = hparams.attention_value_channels or hparams.hidden_size
    num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers

    cache = {
        "layer_%d" % layer: {
            "k":
            tf.zeros([
                batch_size, hparams.num_heads,
                key_channels // hparams.num_heads, decode_length
            ],
                     dtype=encoder_output.dtype),
            "v":
            tf.zeros([
                batch_size, hparams.num_heads,
                value_channels // hparams.num_heads, decode_length
            ],
                     dtype=encoder_output.dtype),
        }
        for layer in range(num_layers)
    }

    kv_encdecs = {"layer_%d" % layer: {} for layer in range(num_layers)}
    if encoder_output is not None:
        for layer in range(num_layers):
            layer_name = "layer_%d" % layer
            with tf.variable_scope(
                    "%sdecoder/%s/encdec_attention/multihead_attention" %
                (scope_prefix, layer_name)):
                k_encdec = common_attention.compute_attention_component(
                    encoder_output, key_channels, hparams.num_heads, name="k")
                k_encdec = beam_search.merge_beam_dim(
                    beam_search.expand_to_beam_size(k_encdec, beam_size))
                v_encdec = common_attention.compute_attention_component(
                    encoder_output,
                    value_channels,
                    hparams.num_heads,
                    name="v")
                v_encdec = beam_search.merge_beam_dim(
                    beam_search.expand_to_beam_size(v_encdec, beam_size))
            kv_encdecs[layer_name]["k_encdec"] = k_encdec
            kv_encdecs[layer_name]["v_encdec"] = v_encdec

    initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
    decoded_ids, scores = beam_search.beam_search(symbols_to_logits_fn,
                                                  initial_ids,
                                                  beam_size,
                                                  decode_length,
                                                  vocab_size,
                                                  alpha,
                                                  states=cache,
                                                  kv_encdecs=kv_encdecs,
                                                  eos_id=eos_id,
                                                  stop_early=(top_beams == 1))

    if top_beams == 1:
        decoded_ids = decoded_ids[:, 0, 1:]
        scores = scores[:, 0]
    else:
        decoded_ids = decoded_ids[:, :top_beams, 1:]
        scores = scores[:, :top_beams]

    return {"outputs": decoded_ids, "scores": scores}