Exemplo n.º 1
0
  def _body(step, finished, state_trans, state_ae, inputs, outputs, attention, cum_log_probs, extra_vars):
    # Get log probs from the model.
    with tf.variable_scope(trans_model_name, reuse=tf.AUTO_REUSE):
      with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
        result_trans = symbols_to_logits_fn_trans(inputs, step, state_trans)
    with tf.variable_scope(ae_model_name, reuse=tf.AUTO_REUSE):
      with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
        result_ae    = symbols_to_logits_fn_ae   (inputs, step, state_ae   )
    logits_trans, state_trans = result_trans[0], result_trans[1]
    logits_ae   , state_ae    = result_ae   [0], result_ae   [1]
    logits_trans = tf.cast(logits_trans, tf.float32)
    logits_ae    = tf.cast(logits_ae   , tf.float32)
    logits = logits_trans + logits_ae
    if (low_prob != None):
        logits = logits - tf.abs(logits * low_prob)
    print ("=============================================")
    print (logits)

    # Penalize or force EOS.
    batch_size, vocab_size = misc.shape_list(logits_trans)
    eos_max_prob = tf.one_hot(
        tf.fill([batch_size], end_id),
        vocab_size,
        on_value=logits_trans.dtype.max,
        off_value=logits_trans.dtype.min)
    logits = tf.where(finished, x=eos_max_prob, y=logits)
    log_probs = tf.nn.log_softmax(logits)
    #logits_trans = tf.cond(
    #    step < minimum_iterations,
    #    true_fn=lambda: _penalize_token(logits_trans, end_id),
    #    false_fn=lambda: tf.where(finished, x=eos_max_prob, y=logits_trans))
    #logits_ae    = tf.cond(
    #    step < minimum_iterations,
    #    true_fn=lambda: _penalize_token(logits_ae   , end_id),
    #    false_fn=lambda: tf.where(finished, x=eos_max_prob, y=logits_ae   ))
    #log_probs_trans = tf.nn.log_softmax(logits_trans)
    #log_probs_ae    = tf.nn.log_softmax(logits_ae   )
    #log_probs = 1 * log_probs_trans + 0.2 * log_probs_ae

    # Run one decoding strategy step.
    print ("============================================================")
    print (type(decoding_strategy))
    output, next_cum_log_probs, finished, state_trans, state_ae, extra_vars = decoding_strategy.step(
        step,
        sampler,
        log_probs,
        cum_log_probs,
        finished,
        state_trans,
        state_ae,
        extra_vars,
        attention=None)

    outputs = outputs.write(step, output)
    cum_log_probs = tf.where(finished, x=cum_log_probs, y=next_cum_log_probs)
    finished = tf.logical_or(finished, tf.equal(output, end_id))
    return step + 1, finished, state_trans, state_ae, output, outputs, attention, cum_log_probs, extra_vars
Exemplo n.º 2
0
def _gather_from_word_indices(tensor, indices):
  """Index the depth dim of a 2D tensor."""
  output_shape = misc.shape_list(indices)
  batch_size = tf.shape(tensor)[0]
  num_indices = tf.size(indices) // batch_size
  batch_pos = tf.range(batch_size * num_indices) // num_indices
  tensor = tf.gather_nd(tensor, tf.stack([batch_pos, tf.reshape(indices, [-1])], axis=-1))
  tensor = tf.reshape(tensor, output_shape)
  return tensor
Exemplo n.º 3
0
    def __call__(self, tokens, sequence_length=None, keep_shape=True):
        """Applies noise on :obj:`tokens`.

    Args:
      tokens: A string ``tf.Tensor`` or batch of string ``tf.Tensor``.
      sequence_length: When :obj:`tokens` is ND, the length of each sequence in
        the batch.
      keep_shape: Ensure that the shape is kept. Otherwise, fit the shape to the
        new lengths.

    Returns:
      A tuple with the noisy version of :obj:`tokens` and the new lengths.
    """
        rank = tokens.shape.ndims
        if rank == 1:
            input_length = tf.shape(tokens)[0]
            if sequence_length is not None:
                tokens = tokens[:sequence_length]
            else:
                tokens = tokens[:tf.math.count_nonzero(tokens)]
            words = tokens_to_words(tokens,
                                    subword_token=self.subword_token,
                                    is_spacer=self.is_spacer)
            for noise in self.noises:
                words = noise(words)
            outputs = tf.RaggedTensor.from_tensor(words,
                                                  padding="").flat_values
            output_length = tf.shape(outputs)[0]
            if keep_shape:
                outputs = tf.pad(outputs, [[0, input_length - output_length]])
            return outputs, output_length
        elif rank == 2:
            if sequence_length is None:
                raise ValueError(
                    "sequence_length must be passed for 2D inputs")
            tokens, sequence_length = tf.map_fn(
                lambda arg: self(*arg, keep_shape=True),
                (tokens, sequence_length),
                back_prop=False)
            if not keep_shape:
                tokens = tokens[:, :tf.reduce_max(sequence_length)]
            return tokens, sequence_length
        else:
            if sequence_length is None:
                raise ValueError(
                    "sequence_length must be passed for ND inputs")
            original_shape = misc.shape_list(tokens)
            tokens = tf.reshape(tokens, [-1, original_shape[-1]])
            sequence_length = tf.reshape(sequence_length, [-1])
            tokens, sequence_length = self(tokens,
                                           sequence_length,
                                           keep_shape=keep_shape)
            print(original_shape[:-1] + [-1])
            tokens = tf.reshape(tokens, original_shape[:-1] + [-1])
            sequence_length = tf.reshape(sequence_length, original_shape[:-1])
            return tokens, sequence_length
Exemplo n.º 4
0
 def call(self, inputs):
     shape = shape_list(inputs)
     rank = len(shape)
     if rank > 2:
         inputs = tf.reshape(inputs, [-1, shape[-1]])
     outputs = tf.matmul(inputs, self.kernel, transpose_b=self.transpose)
     if self.use_bias:
         outputs = tf.nn.bias_add(outputs, self.bias)
     if self.activation is not None:
         outputs = self.activation(outputs)
     if rank > 2:
         outputs = tf.reshape(outputs, shape[:-1] + [self.units])
     return outputs
Exemplo n.º 5
0
 def call(self, inputs):
     if self.weight is None:
         return super(Dense, self).call(inputs)
     shape = shape_list(inputs)
     rank = len(shape)
     if rank > 2:
         inputs = tf.reshape(inputs, [-1, shape[-1]])
     outputs = tf.matmul(inputs, self.kernel, transpose_b=self.transpose)
     if self.use_bias:
         outputs = tf.nn.bias_add(outputs, self.bias)
     if rank > 2:
         outputs = tf.reshape(outputs, shape[:-1] + [self.units])
     return outputs
Exemplo n.º 6
0
def combine_heads(inputs):
  """Concatenates heads.

  Args:
    inputs: A ``tf.Tensor`` of shape :math:`[B, H, T, D]`.

  Returns:
    A ``tf.Tensor`` of shape :math:`[B, T, D * H]`.
  """
  shape = misc.shape_list(inputs)
  outputs = tf.transpose(inputs, perm=[0, 2, 1, 3])
  outputs = tf.reshape(outputs, [shape[0], shape[2], shape[1] * shape[3]])
  return outputs
Exemplo n.º 7
0
def split_heads(inputs, num_heads):
  """Splits a tensor in depth.

  Args:
    inputs: A ``tf.Tensor`` of shape :math:`[B, T, D]`.
    num_heads: The number of heads :math:`H`.

  Returns:
    A ``tf.Tensor`` of shape :math:`[B, H, T, D / H]`.
  """
  shape = misc.shape_list(inputs)
  outputs = tf.reshape(inputs, [shape[0], shape[1], num_heads, shape[2] // num_heads])
  outputs = tf.transpose(outputs, perm=[0, 2, 1, 3])
  return outputs
Exemplo n.º 8
0
def _merge_beam_dim(tensor):
  """Reshapes first two dimensions in to single dimension.

  Args:
    tensor: Tensor to reshape of shape [A, B, ...]

  Returns:
    Reshaped tensor of shape [A*B, ...]
  """
  if isinstance(tensor, tf.TensorArray) or tensor.shape.ndims < 1:
    return tensor
  shape = shape_list(tensor)
  shape[0] *= shape[1]  # batch -> batch * beam_size
  shape.pop(1)  # Remove beam dim
  return tf.reshape(tensor, shape)
Exemplo n.º 9
0
def _unmerge_beam_dim(tensor, batch_size, beam_size):
  """Reshapes first dimension back to [batch_size, beam_size].

  Args:
    tensor: Tensor to reshape of shape [batch_size*beam_size, ...]
    batch_size: Tensor, original batch size.
    beam_size: int, original beam size.

  Returns:
    Reshaped tensor of shape [batch_size, beam_size, ...]
  """
  if isinstance(tensor, tf.TensorArray) or tensor.shape.ndims < 1:
    return tensor
  shape = shape_list(tensor)
  new_shape = [batch_size] + [beam_size] + shape[1:]
  return tf.reshape(tensor, new_shape)
Exemplo n.º 10
0
    def _body(step, finished, state, inputs, outputs, attention, cum_log_probs,
              extra_vars):
        # Get log probs from the model.
        result = symbols_to_logits_fn(inputs, step, state)
        logits, state = result[0], result[1]
        attn = result[2] if len(result) > 2 else None
        logits = tf.cast(logits, tf.float32)

        # Penalize or force EOS.
        batch_size, vocab_size = misc.shape_list(logits)
        eos_max_prob = tf.one_hot(tf.fill([batch_size], end_id),
                                  vocab_size,
                                  on_value=logits.dtype.max,
                                  off_value=logits.dtype.min)
        logits = tf.cond(step < minimum_iterations,
                         true_fn=lambda: _penalize_token(logits, end_id),
                         false_fn=lambda: tf.where(tf.broadcast_to(
                             tf.expand_dims(finished, -1), tf.shape(logits)),
                                                   x=eos_max_prob,
                                                   y=logits))
        log_probs = tf.nn.log_softmax(logits)

        # Run one decoding strategy step.
        output, next_cum_log_probs, finished, state, extra_vars = (
            decoding_strategy._step(  # pylint: disable=protected-access
                step,
                sampler,
                log_probs,
                cum_log_probs,
                finished,
                state,
                extra_vars,
                attention=attn))

        # Update loop vars.
        if attention_history:
            if attn is None:
                raise ValueError(
                    "attention_history is set but the model did not return attention"
                )
            attention = attention.write(step, tf.cast(attn, tf.float32))
        outputs = outputs.write(step, output)
        cum_log_probs = tf.where(finished,
                                 x=cum_log_probs,
                                 y=next_cum_log_probs)
        finished = tf.logical_or(finished, tf.equal(output, end_id))
        return step + 1, finished, state, output, outputs, attention, cum_log_probs, extra_vars
Exemplo n.º 11
0
def matmul_with_relative_representations(a, b, transpose_b=False):  # pylint: disable=invalid-name
    """Multiplies :obj:`a` with the relative representations :obj:`b`.

  Args:
    a: Tensor with shape :math:`[B, H, T, _]`.
    b: Tensor with shape :math:`[T, T, _]`.

  Returns:
    Tensor with shape :math:`[B, H, T, T]`.
  """
    batch, head, time, _ = misc.shape_list(a)
    a = tf.transpose(a, perm=[2, 0, 1, 3])
    a = tf.reshape(a, [time, batch * head, -1])
    c = tf.matmul(a, b, transpose_b=transpose_b)
    c = tf.reshape(c, [time, batch, head, -1])
    c = tf.transpose(c, perm=[1, 2, 0, 3])
    return c
Exemplo n.º 12
0
def matmul_with_relative_representations(a, b, transpose_b=False):
    """Multiplies :obj:`a` with the relative representations :obj:`b`.

    Args:
      a: Tensor with shape :math:`[B, H, T, _]`.
      b: Tensor with shape :math:`[T, T, _]`.

    Returns:
      Tensor with shape :math:`[B, H, T, T]`.
    """
    batch, head, time, depth = misc.shape_list(a)
    a = tf.transpose(a, perm=[2, 0, 1, 3])
    a = tf.reshape(a, [time, batch * head, depth])
    c = tf.matmul(a, b, transpose_b=transpose_b)
    c = tf.reshape(c, [time, batch, head, c.shape[-1] or tf.shape(c)[-1]])
    c = tf.transpose(c, perm=[1, 2, 0, 3])
    return c
Exemplo n.º 13
0
 def _call(self, tokens, sequence_length, keep_shape):
     rank = tokens.shape.ndims
     if rank == 1:
         input_length = tf.shape(tokens)[0]
         if sequence_length is not None:
             tokens = tokens[:sequence_length]
         else:
             tokens = tokens[:tf.math.count_nonzero(tokens)]
         words = text.tokens_to_words(tokens,
                                      subword_token=self.subword_token,
                                      is_spacer=self.is_spacer)
         words = words.to_tensor()
         for noise in self.noises:
             words = noise(words)
         outputs = tf.RaggedTensor.from_tensor(words,
                                               padding="").flat_values
         output_length = tf.shape(outputs)[0]
         if keep_shape:
             outputs = tf.pad(outputs, [[0, input_length - output_length]])
         return outputs, output_length
     elif rank == 2:
         if sequence_length is None:
             raise ValueError(
                 "sequence_length must be passed for 2D inputs")
         tokens, sequence_length = tf.map_fn(
             lambda arg: self._call(*arg, keep_shape=True),
             (tokens, sequence_length),
             back_prop=False)
         if not keep_shape:
             tokens = tokens[:, :tf.reduce_max(sequence_length)]
         return tokens, sequence_length
     else:
         if sequence_length is None:
             raise ValueError(
                 "sequence_length must be passed for ND inputs")
         original_shape = misc.shape_list(tokens)
         tokens = tf.reshape(tokens, [-1, original_shape[-1]])
         sequence_length = tf.reshape(sequence_length, [-1])
         tokens, sequence_length = self._call(tokens,
                                              sequence_length,
                                              keep_shape=keep_shape)
         tokens = tf.reshape(tokens, original_shape[:-1] + [-1])
         sequence_length = tf.reshape(sequence_length, original_shape[:-1])
         return tokens, sequence_length
Exemplo n.º 14
0
    def _dynamic_decode(
        self,
        features,
        encoder_outputs,
        encoder_state,
        encoder_sequence_length,
        tflite_run=False,
    ):
        params = self.params
        batch_size = tf.shape(tf.nest.flatten(encoder_outputs)[0])[0]
        start_ids = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
        beam_size = params.get("beam_width", 1)

        if beam_size > 1:
            # Tile encoder outputs to prepare for beam search.
            encoder_outputs = tfa.seq2seq.tile_batch(encoder_outputs,
                                                     beam_size)
            encoder_sequence_length = tfa.seq2seq.tile_batch(
                encoder_sequence_length, beam_size)
            encoder_state = tf.nest.map_structure(
                lambda state: tfa.seq2seq.tile_batch(state, beam_size)
                if state is not None else None,
                encoder_state,
            )

        # Dynamically decodes from the encoder outputs.
        initial_state = self.decoder.initial_state(
            memory=encoder_outputs,
            memory_sequence_length=encoder_sequence_length,
            initial_state=encoder_state,
        )
        (
            sampled_ids,
            sampled_length,
            log_probs,
            alignment,
            _,
        ) = self.decoder.dynamic_decode(
            self.labels_inputter,
            start_ids,
            initial_state=initial_state,
            decoding_strategy=decoding.DecodingStrategy.from_params(
                params, tflite_mode=tflite_run),
            sampler=decoding.Sampler.from_params(params),
            maximum_iterations=params.get("maximum_decoding_length", 250),
            minimum_iterations=params.get("minimum_decoding_length", 0),
            tflite_output_size=params.get("tflite_output_size", 250)
            if tflite_run else None,
        )

        if tflite_run:
            target_tokens = sampled_ids
        else:
            target_tokens = self.labels_inputter.ids_to_tokens.lookup(
                tf.cast(sampled_ids, tf.int64))
        # Maybe replace unknown targets by the source tokens with the highest attention weight.
        if params.get("replace_unknown_target", False):
            if alignment is None:
                raise TypeError(
                    "replace_unknown_target is not compatible with decoders "
                    "that don't return alignment history")
            if not isinstance(self.features_inputter, inputters.WordEmbedder):
                raise TypeError(
                    "replace_unknown_target is only defined when the source "
                    "inputter is a WordEmbedder")

            source_tokens = features if tflite_run else features["tokens"]
            if beam_size > 1:
                source_tokens = tfa.seq2seq.tile_batch(source_tokens,
                                                       beam_size)
            original_shape = tf.shape(target_tokens)
            if tflite_run:
                target_tokens = tf.squeeze(target_tokens, axis=0)
                output_size = original_shape[-1]
                unknown_token = self.labels_inputter.vocabulary_size - 1
            else:
                target_tokens = tf.reshape(target_tokens,
                                           [-1, original_shape[-1]])
                output_size = tf.shape(target_tokens)[1]
                unknown_token = constants.UNKNOWN_TOKEN

            align_shape = misc.shape_list(alignment)
            attention = tf.reshape(
                alignment,
                [
                    align_shape[0] * align_shape[1], align_shape[2],
                    align_shape[3]
                ],
            )
            attention = reducer.align_in_time(attention, output_size)
            replaced_target_tokens = replace_unknown_target(
                target_tokens,
                source_tokens,
                attention,
                unknown_token=unknown_token)
            if tflite_run:
                target_tokens = replaced_target_tokens
            else:
                target_tokens = tf.reshape(replaced_target_tokens,
                                           original_shape)

        if tflite_run:
            if beam_size > 1:
                target_tokens = tf.transpose(target_tokens)
                target_tokens = target_tokens[:, :1]
            target_tokens = tf.squeeze(target_tokens)

            return target_tokens
        # Maybe add noise to the predictions.
        decoding_noise = params.get("decoding_noise")
        if decoding_noise:
            target_tokens, sampled_length = _add_noise(
                target_tokens,
                sampled_length,
                decoding_noise,
                params.get("decoding_subword_token", "■"),
                params.get("decoding_subword_token_is_spacer"),
            )
            alignment = None  # Invalidate alignments.

        predictions = {"log_probs": log_probs}
        if self.labels_inputter.tokenizer.in_graph:
            detokenized_text = self.labels_inputter.tokenizer.detokenize(
                tf.reshape(target_tokens, [batch_size * beam_size, -1]),
                sequence_length=tf.reshape(sampled_length,
                                           [batch_size * beam_size]),
            )
            predictions["text"] = tf.reshape(detokenized_text,
                                             [batch_size, beam_size])
        else:
            predictions["tokens"] = target_tokens
            predictions["length"] = sampled_length
            if alignment is not None:
                predictions["alignment"] = alignment

        # Maybe restrict the number of returned hypotheses based on the user parameter.
        num_hypotheses = params.get("num_hypotheses", 1)
        if num_hypotheses > 0:
            if num_hypotheses > beam_size:
                raise ValueError("n_best cannot be greater than beam_width")
            for key, value in predictions.items():
                predictions[key] = value[:, :num_hypotheses]
        return predictions
Exemplo n.º 15
0
    def _call(self, features, labels, params, mode):
        training = mode == tf.estimator.ModeKeys.TRAIN

        features_length = self.features_inputter.get_length(features)
        source_inputs = self.features_inputter.make_inputs(features,
                                                           training=training)
        with tf.variable_scope("encoder"):
            encoder_outputs, encoder_state, encoder_sequence_length = self.encoder.encode(
                source_inputs, sequence_length=features_length, mode=mode)

        target_vocab_size = self.labels_inputter.vocabulary_size
        target_dtype = self.labels_inputter.dtype
        if labels is not None:
            target_inputs = self.labels_inputter.make_inputs(labels,
                                                             training=training)
            with tf.variable_scope("decoder"):
                sampling_probability = None
                if mode == tf.estimator.ModeKeys.TRAIN:
                    sampling_probability = get_sampling_probability(
                        tf.train.get_or_create_global_step(),
                        read_probability=params.get(
                            "scheduled_sampling_read_probability"),
                        schedule_type=params.get("scheduled_sampling_type"),
                        k=params.get("scheduled_sampling_k"))

                logits, _, _, attention = self.decoder.decode(
                    target_inputs,
                    self.labels_inputter.get_length(labels),
                    vocab_size=target_vocab_size,
                    initial_state=encoder_state,
                    sampling_probability=sampling_probability,
                    embedding=self.labels_inputter.embedding,
                    output_layer=self.output_layer,
                    mode=mode,
                    memory=encoder_outputs,
                    memory_sequence_length=encoder_sequence_length,
                    return_alignment_history=True)
                if "alignment" in labels:
                    outputs = {"logits": logits, "attention": attention}
                else:
                    outputs = logits
        else:
            outputs = None

        if mode != tf.estimator.ModeKeys.TRAIN:
            with tf.variable_scope("decoder", reuse=labels is not None):
                batch_size = tf.shape(
                    tf.contrib.framework.nest.flatten(encoder_outputs)[0])[0]
                beam_width = params.get("beam_width", 1)
                start_tokens = tf.fill([batch_size],
                                       constants.START_OF_SENTENCE_ID)
                end_token = constants.END_OF_SENTENCE_ID
                sampled_ids, _, sampled_length, log_probs, alignment = (
                    self.decoder.dynamic_decode_and_search(
                        self.labels_inputter.embedding,
                        start_tokens,
                        end_token,
                        vocab_size=target_vocab_size,
                        initial_state=encoder_state,
                        output_layer=self.output_layer,
                        beam_width=beam_width,
                        length_penalty=params.get("length_penalty", 0),
                        maximum_iterations=params.get("maximum_iterations",
                                                      250),
                        minimum_length=params.get("minimum_decoding_length",
                                                  0),
                        mode=mode,
                        memory=encoder_outputs,
                        memory_sequence_length=encoder_sequence_length,
                        dtype=target_dtype,
                        return_alignment_history=True,
                        sample_from=params.get("sampling_topk"),
                        sample_temperature=params.get("sampling_temperature")))

            target_vocab_rev = self.labels_inputter.vocabulary_lookup_reverse()
            target_tokens = target_vocab_rev.lookup(
                tf.cast(sampled_ids, tf.int64))

            if params.get("replace_unknown_target", False):
                if alignment is None:
                    raise TypeError(
                        "replace_unknown_target is not compatible with decoders "
                        "that don't return alignment history")
                if not isinstance(self.features_inputter,
                                  inputters.WordEmbedder):
                    raise TypeError(
                        "replace_unknown_target is only defined when the source "
                        "inputter is a WordEmbedder")
                source_tokens = features["tokens"]
                if beam_width > 1:
                    source_tokens = tf.contrib.seq2seq.tile_batch(
                        source_tokens, multiplier=beam_width)
                # Merge batch and beam dimensions.
                original_shape = tf.shape(target_tokens)
                target_tokens = tf.reshape(target_tokens,
                                           [-1, original_shape[-1]])
                align_shape = shape_list(alignment)
                attention = tf.reshape(alignment, [
                    align_shape[0] * align_shape[1], align_shape[2],
                    align_shape[3]
                ])
                # We don't have attention for </s> but ensure that the attention time dimension matches
                # the tokens time dimension.
                attention = reducer.align_in_time(attention,
                                                  tf.shape(target_tokens)[1])
                replaced_target_tokens = replace_unknown_target(
                    target_tokens, source_tokens, attention)
                target_tokens = tf.reshape(replaced_target_tokens,
                                           original_shape)

            predictions = {
                "tokens": target_tokens,
                "length": sampled_length,
                "log_probs": log_probs
            }
            if alignment is not None:
                predictions["alignment"] = alignment
        else:
            predictions = None

        return outputs, predictions
Exemplo n.º 16
0
    def _dynamic_decode(self, features, encoder_outputs, encoder_state,
                        encoder_sequence_length):
        params = self.params
        batch_size = tf.shape(tf.nest.flatten(encoder_outputs)[0])[0]
        start_ids = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
        beam_size = params.get("beam_width", 1)

        if beam_size > 1:
            # Tile encoder outputs to prepare for beam search.
            encoder_outputs = tfa.seq2seq.tile_batch(encoder_outputs,
                                                     beam_size)
            encoder_sequence_length = tfa.seq2seq.tile_batch(
                encoder_sequence_length, beam_size)
            if encoder_state is not None:
                encoder_state = tfa.seq2seq.tile_batch(encoder_state,
                                                       beam_size)

        # Dynamically decodes from the encoder outputs.
        initial_state = self.decoder.initial_state(
            memory=encoder_outputs,
            memory_sequence_length=encoder_sequence_length,
            initial_state=encoder_state)
        sampled_ids, sampled_length, log_probs, alignment, _ = self.decoder.dynamic_decode(
            self.labels_inputter,
            start_ids,
            initial_state=initial_state,
            decoding_strategy=decoding.DecodingStrategy.from_params(params),
            sampler=decoding.Sampler.from_params(params),
            maximum_iterations=params.get("maximum_decoding_length", 250),
            minimum_iterations=params.get("minimum_decoding_length", 0))
        target_tokens = self.labels_inputter.ids_to_tokens.lookup(
            tf.cast(sampled_ids, tf.int64))

        # Maybe replace unknown targets by the source tokens with the highest attention weight.
        if params.get("replace_unknown_target", False):
            if alignment is None:
                raise TypeError(
                    "replace_unknown_target is not compatible with decoders "
                    "that don't return alignment history")
            if not isinstance(self.features_inputter, inputters.WordEmbedder):
                raise TypeError(
                    "replace_unknown_target is only defined when the source "
                    "inputter is a WordEmbedder")
            source_tokens = features["tokens"]
            if beam_size > 1:
                source_tokens = tfa.seq2seq.tile_batch(source_tokens,
                                                       beam_size)
            # Merge batch and beam dimensions.
            original_shape = tf.shape(target_tokens)
            target_tokens = tf.reshape(target_tokens, [-1, original_shape[-1]])
            align_shape = shape_list(alignment)
            attention = tf.reshape(alignment, [
                align_shape[0] * align_shape[1], align_shape[2], align_shape[3]
            ])
            # We don't have attention for </s> but ensure that the attention time dimension matches
            # the tokens time dimension.
            attention = reducer.align_in_time(attention,
                                              tf.shape(target_tokens)[1])
            replaced_target_tokens = replace_unknown_target(
                target_tokens, source_tokens, attention)
            target_tokens = tf.reshape(replaced_target_tokens, original_shape)

        # Maybe add noise to the predictions.
        decoding_noise = params.get("decoding_noise")
        if decoding_noise:
            target_tokens, sampled_length = _add_noise(
                target_tokens, sampled_length, decoding_noise,
                params.get("decoding_subword_token", "■"))
            alignment = None  # Invalidate alignments.

        predictions = {
            "tokens": target_tokens,
            "length": sampled_length,
            "log_probs": log_probs
        }
        if alignment is not None:
            predictions["alignment"] = alignment

        # Maybe restrict the number of returned hypotheses based on the user parameter.
        num_hypotheses = params.get("num_hypotheses", 1)
        if num_hypotheses > 0:
            if num_hypotheses > beam_size:
                raise ValueError("n_best cannot be greater than beam_width")
            for key, value in six.iteritems(predictions):
                predictions[key] = value[:, :num_hypotheses]
        return predictions
Exemplo n.º 17
0
  def _call(self, features, labels, params, mode):
    training = mode == tf.estimator.ModeKeys.TRAIN

    features_length = self.features_inputter.get_length(features)
    source_inputs = self.features_inputter.make_inputs(features, training=training)
    with tf.variable_scope("encoder"):
      encoder_outputs, encoder_state, encoder_sequence_length = self.encoder.encode(
          source_inputs,
          sequence_length=features_length,
          mode=mode)

    target_vocab_size = self.labels_inputter.vocabulary_size
    target_dtype = self.labels_inputter.dtype
    if labels is not None:
      sampling_probability = None
      if mode == tf.estimator.ModeKeys.TRAIN:
        sampling_probability = get_sampling_probability(
            tf.train.get_or_create_global_step(),
            read_probability=params.get("scheduled_sampling_read_probability"),
            schedule_type=params.get("scheduled_sampling_type"),
            k=params.get("scheduled_sampling_k"))

      def _decode_inputs(inputs, length, reuse=None):
        with tf.variable_scope("decoder", reuse=reuse):
          return self.decoder.decode(
              inputs,
              length,
              vocab_size=target_vocab_size,
              initial_state=encoder_state,
              sampling_probability=sampling_probability,
              embedding=self.labels_inputter.embedding,
              output_layer=self.output_layer,
              mode=mode,
              memory=encoder_outputs,
              memory_sequence_length=encoder_sequence_length,
              return_alignment_history=True)

      target_inputs = self.labels_inputter.make_inputs(labels, training=training)
      logits, _, _, attention = _decode_inputs(target_inputs, labels["length"])
      if "alignment" in labels:
        outputs = {
            "logits": logits,
            "attention": attention
        }
      else:
        outputs = logits

      noisy_ids = labels.get("noisy_ids")
      if noisy_ids is not None and params.get("contrastive_learning"):
        # In case of contrastive learning, also forward the erroneous
        # translation to compute its log likelihood later.
        noisy_inputs = self.labels_inputter.make_inputs({"ids": noisy_ids}, training=training)
        noisy_logits = _decode_inputs(noisy_inputs, labels["noisy_length"], reuse=True)[0]
        if not isinstance(outputs, dict):
          outputs = dict(logits=outputs)
        outputs["noisy_logits"] = noisy_logits
    else:
      outputs = None

    if mode != tf.estimator.ModeKeys.TRAIN:
      with tf.variable_scope("decoder", reuse=labels is not None):
        batch_size = tf.shape(tf.contrib.framework.nest.flatten(encoder_outputs)[0])[0]
        beam_width = params.get("beam_width", 1)
        start_tokens = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
        end_token = constants.END_OF_SENTENCE_ID
        sampled_ids, _, sampled_length, log_probs, alignment = (
            self.decoder.dynamic_decode_and_search(
                self.labels_inputter.embedding,
                start_tokens,
                end_token,
                vocab_size=target_vocab_size,
                initial_state=encoder_state,
                output_layer=self.output_layer,
                beam_width=beam_width,
                length_penalty=params.get("length_penalty", 0),
                maximum_iterations=params.get("maximum_iterations", 250),
                minimum_length=params.get("minimum_decoding_length", 0),
                mode=mode,
                memory=encoder_outputs,
                memory_sequence_length=encoder_sequence_length,
                dtype=target_dtype,
                return_alignment_history=True,
                sample_from=params.get("sampling_topk"),
                sample_temperature=params.get("sampling_temperature"),
                coverage_penalty=params.get("coverage_penalty", 0)))

      target_vocab_rev = self.labels_inputter.vocabulary_lookup_reverse()
      target_tokens = target_vocab_rev.lookup(tf.cast(sampled_ids, tf.int64))

      if params.get("replace_unknown_target", False):
        if alignment is None:
          raise TypeError("replace_unknown_target is not compatible with decoders "
                          "that don't return alignment history")
        if not isinstance(self.features_inputter, inputters.WordEmbedder):
          raise TypeError("replace_unknown_target is only defined when the source "
                          "inputter is a WordEmbedder")
        source_tokens = features["tokens"]
        if beam_width > 1:
          source_tokens = tf.contrib.seq2seq.tile_batch(source_tokens, multiplier=beam_width)
        # Merge batch and beam dimensions.
        original_shape = tf.shape(target_tokens)
        target_tokens = tf.reshape(target_tokens, [-1, original_shape[-1]])
        align_shape = shape_list(alignment)
        attention = tf.reshape(
            alignment, [align_shape[0] * align_shape[1], align_shape[2], align_shape[3]])
        # We don't have attention for </s> but ensure that the attention time dimension matches
        # the tokens time dimension.
        attention = reducer.align_in_time(attention, tf.shape(target_tokens)[1])
        replaced_target_tokens = replace_unknown_target(target_tokens, source_tokens, attention)
        target_tokens = tf.reshape(replaced_target_tokens, original_shape)

      decoding_noise = params.get("decoding_noise")
      if decoding_noise:
        sampled_length -= 1  # Ignore </s>
        target_tokens, sampled_length = _add_noise(
            target_tokens,
            sampled_length,
            decoding_noise,
            params.get("decoding_subword_token", "■"))
        sampled_length += 1
        alignment = None  # Invalidate alignments.

      predictions = {
          "tokens": target_tokens,
          "length": sampled_length,
          "log_probs": log_probs
      }
      if alignment is not None:
        predictions["alignment"] = alignment

      num_hypotheses = params.get("num_hypotheses", 1)
      if num_hypotheses > 0:
        if num_hypotheses > beam_width:
          raise ValueError("n_best cannot be greater than beam_width")
        for key, value in six.iteritems(predictions):
          predictions[key] = value[:, :num_hypotheses]
    else:
      predictions = None

    return outputs, predictions
Exemplo n.º 18
0
def beam_search(symbols_to_logits_fn,
                initial_ids,
                beam_size,
                decode_length,
                vocab_size,
                alpha,
                states=None,
                eos_id=EOS_ID,
                stop_early=True,
                return_states=False,
                tile_states=True,
                min_decode_length=0):
  """Beam search with length penalties.

  Requires a function that can take the currently decoded symbols and return
  the logits for the next symbol. The implementation is inspired by
  https://arxiv.org/abs/1609.08144.

  When running, the beam search steps can be visualized by using tfdbg to watch
  the operations generating the output ids for each beam step.  These operations
  have the pattern:
    (alive|finished)_topk_(seq,scores)

  Operations marked `alive` represent the new beam sequences that will be
  processed in the next step.  Operations marked `finished` represent the
  completed beam sequences, which may be padded with 0s if no beams finished.

  Operations marked `seq` store the full beam sequence for the time step.
  Operations marked `scores` store the sequence's final log scores.

  The beam search steps will be processed sequentially in order, so when
  capturing observed from these operations, tensors, clients can make
  assumptions about which step is being recorded.

  WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this
  means that the shape of the 2nd dimension of these tensors will not be
  available (i.e. set to None) inside symbols_to_logits_fn.

  Args:
    symbols_to_logits_fn: Interface to the model, to provide logits.
        Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size]
    initial_ids: Ids to start off the decoding, this will be the first thing
        handed to symbols_to_logits_fn (after expanding to beam size)
        [batch_size]
    beam_size: Size of the beam.
    decode_length: Number of steps to decode for.
    vocab_size: Size of the vocab, must equal the size of the logits returned by
        symbols_to_logits_fn
    alpha: alpha for length penalty.
    states: dict (possibly nested) of decoding states.
    eos_id: ID for end of sentence.
    stop_early: a boolean - stop once best sequence is provably determined.
    return_states: a boolean - return the update states dictionary.
    tile_states: a boolean - internally tile the provided states.
    min_decode_length: Minimum length of decoded hypotheses (EOS excluded).
  Returns:
    Tuple of
    (decoded beams [batch_size, beam_size, decode_length]
     decoding probabilities [batch_size, beam_size]) and the decoding
    states if `return_states` is True.
  """
  batch_size = shape_list(initial_ids)[0]

  # Assume initial_ids are prob 1.0
  initial_log_probs = tf.constant([[0.] + [-float("inf")] * (beam_size - 1)])
  # Expand to beam_size (batch_size, beam_size)
  alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1])

  # Expand each batch and state to beam_size
  alive_seq = _expand_to_beam_size(initial_ids, beam_size)
  alive_seq = tf.expand_dims(alive_seq, axis=2)  # (batch_size, beam_size, 1)
  if states:
    states = nest.map_structure(
        lambda state: (
            _expand_to_beam_size(state, beam_size) if tile_states
            else _unmerge_beam_dim(state, batch_size, beam_size)),
        states)
  else:
    states = {}

  # Finished will keep track of all the sequences that have finished so far
  # Finished log probs will be negative infinity in the beginning
  # finished_flags will keep track of booleans
  finished_seq = tf.zeros(shape_list(alive_seq), tf.int32)
  # Setting the scores of the initial to negative infinity.
  finished_scores = tf.ones([batch_size, beam_size]) * -INF
  finished_flags = tf.zeros([batch_size, beam_size], tf.bool)

  def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq,
                    curr_scores, curr_finished):
    """Given sequences and scores, will gather the top k=beam size sequences.

    Args:
      finished_seq: Current finished sequences.
        [batch_size, beam_size, current_decoded_length]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]
      finished_flags: finished bools for each of these sequences.
        [batch_size, beam_size]
      curr_seq: current topk sequence that has been grown by one position.
        [batch_size, beam_size, current_decoded_length]
      curr_scores: scores for each of these sequences. [batch_size, beam_size]
      curr_finished: Finished flags for each of these sequences.
        [batch_size, beam_size]
    Returns:
      Tuple of
        (Topk sequences based on scores,
         log probs of these sequences,
         Finished flags of these sequences)
    """
    # First append a column of 0'ids to finished to make the same length with
    # finished scores
    finished_seq = tf.concat(
        [finished_seq,
         tf.zeros([batch_size, beam_size, 1], tf.int32)], axis=2)

    # Set the scores of the unfinished seq in curr_seq to large negative
    # values
    curr_scores += (1. - tf.cast(curr_finished, tf.float32)) * -INF
    # concatenating the sequences and scores along beam axis
    curr_finished_seq = tf.concat([finished_seq, curr_seq], axis=1)
    curr_finished_scores = tf.concat([finished_scores, curr_scores], axis=1)
    curr_finished_flags = tf.concat([finished_flags, curr_finished], axis=1)
    return compute_topk_scores_and_seq(
        curr_finished_seq, curr_finished_scores, curr_finished_scores,
        curr_finished_flags, beam_size, batch_size, "grow_finished")

  def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished, states):
    """Given sequences and scores, will gather the top k=beam size sequences.

    Args:
      curr_seq: current topk sequence that has been grown by one position.
        [batch_size, beam_size, i+1]
      curr_scores: scores for each of these sequences. [batch_size, beam_size]
      curr_log_probs: log probs for each of these sequences.
        [batch_size, beam_size]
      curr_finished: Finished flags for each of these sequences.
        [batch_size, beam_size]
      states: dict (possibly nested) of decoding states.
    Returns:
      Tuple of
        (Topk sequences based on scores,
         log probs of these sequences,
         Finished flags of these sequences)
    """
    # Set the scores of the finished seq in curr_seq to large negative
    # values
    curr_scores += tf.cast(curr_finished, tf.float32) * -INF
    return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs,
                                       curr_finished, beam_size, batch_size,
                                       "grow_alive", states)

  def grow_topk(i, alive_seq, alive_log_probs, states):
    r"""Inner beam search loop.

    This function takes the current alive sequences, and grows them to topk
    sequences where k = 2*beam. We use 2*beam because, we could have beam_size
    number of sequences that might hit <EOS> and there will be no alive
    sequences to continue. With 2*beam_size, this will not happen. This relies
    on the assumption the vocab size is > beam size. If this is true, we'll
    have at least beam_size non <EOS> extensions if we extract the next top
    2*beam words.
    Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to
    https://arxiv.org/abs/1609.08144.

    Args:
      i: loop index
      alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
      alive_log_probs: probabilities of these sequences. [batch_size, beam_size]
      states: dict (possibly nested) of decoding states.
    Returns:
      Tuple of
        (Topk sequences extended by the next word,
         The log probs of these sequences,
         The scores with length penalty of these sequences,
         Flags indicating which of these sequences have finished decoding,
         dict of transformed decoding states)
    """
    # Get the logits for all the possible next symbols
    flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1])

    # (batch_size * beam_size, decoded_length)
    if states:
      flat_states = nest.map_structure(_merge_beam_dim, states)
      flat_logits, flat_states = symbols_to_logits_fn(flat_ids, i, flat_states)
      states = nest.map_structure(
          lambda t: _unmerge_beam_dim(t, batch_size, beam_size), flat_states)
    else:
      flat_logits = symbols_to_logits_fn(flat_ids)

    logits = tf.reshape(flat_logits, [batch_size, beam_size, -1])

    # Convert logits to normalized log probs
    candidate_log_probs = _log_prob_from_logits(tf.cast(logits, tf.float32))

    # Multiply the probabilities by the current probabilities of the beam.
    # (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)
    log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
    if min_decode_length > 0:
      log_probs = tf.cond(
          i < min_decode_length,
          true_fn=lambda: _unmerge_beam_dim(
              penalize_token(_merge_beam_dim(log_probs), eos_id),
              batch_size, beam_size),
          false_fn=lambda: log_probs)

    length_penalty = tf.pow(((5. + tf.cast(i + 1, tf.float32)) / 6.), alpha)

    curr_scores = log_probs / length_penalty
    # Flatten out (beam_size, vocab_size) probs in to a list of possibilities
    flat_curr_scores = tf.reshape(curr_scores, [-1, beam_size * vocab_size])

    topk_scores, topk_ids = tf.nn.top_k(flat_curr_scores, k=beam_size * 2)

    # Recovering the log probs because we will need to send them back
    topk_log_probs = topk_scores * length_penalty

    # Work out what beam the top probs are in.
    topk_beam_index = topk_ids // vocab_size
    topk_ids %= vocab_size  # Unflatten the ids

    # The next three steps are to create coordinates for tf.gather_nd to pull
    # out the correct sequences from id's that we need to grow.
    # We will also use the coordinates to gather the booleans of the beam items
    # that survived.
    batch_pos = compute_batch_indices(batch_size, beam_size * 2)

    # top beams will give us the actual coordinates to do the gather.
    # stacking will create a tensor of dimension batch * beam * 2, where the
    # last dimension contains the i,j gathering coordinates.
    topk_coordinates = tf.stack([batch_pos, topk_beam_index], axis=2)

    # Gather up the most probable 2*beams both for the ids and finished_in_alive
    # bools
    topk_seq = tf.gather_nd(alive_seq, topk_coordinates)
    if states:
      states = nest.map_structure(
          lambda state: _gather_state(state, topk_coordinates), states)

    # Append the most probable alive
    topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2)

    topk_finished = tf.equal(topk_ids, eos_id)

    return topk_seq, topk_log_probs, topk_scores, topk_finished, states

  def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores,
                 finished_flags, states):
    """Inner beam search loop.

    There are three groups of tensors, alive, finished, and topk.
    The alive group contains information about the current alive sequences
    The topk group contains information about alive + topk current decoded words
    the finished group contains information about finished sentences, that is,
    the ones that have decoded to <EOS>. These are what we return.
    The general beam search algorithm is as follows:
    While we haven't terminated (pls look at termination condition)
      1. Grow the current alive to get beam*2 topk sequences
      2. Among the topk, keep the top beam_size ones that haven't reached EOS
      into alive
      3. Among the topk, keep the top beam_size ones have reached EOS into
      finished
    Repeat
    To make things simple with using fixed size tensors, we will end
    up inserting unfinished sequences into finished in the beginning. To stop
    that we add -ve INF to the score of the unfinished sequence so that when a
    true finished sequence does appear, it will have a higher score than all the
    unfinished ones.

    Args:
      i: loop index
      alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
      alive_log_probs: probabilities of the beams. [batch_size, beam_size]
      finished_seq: Current finished sequences.
        [batch_size, beam_size, i+1]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]
      finished_flags: finished bools for each of these sequences.
        [batch_size, beam_size]
      states: dict (possibly nested) of decoding states.

    Returns:
      Tuple of
        (Incremented loop index
         New alive sequences,
         Log probs of the alive sequences,
         New finished sequences,
         Scores of the new finished sequences,
         Flags indicating which sequence in finished as reached EOS,
         dict of final decoding states)
    """

    # Each inner loop, we carry out three steps:
    # 1. Get the current topk items.
    # 2. Extract the ones that have finished and haven't finished
    # 3. Recompute the contents of finished based on scores.
    topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk(
        i, alive_seq, alive_log_probs, states)
    alive_seq, alive_log_probs, _, states = grow_alive(
        topk_seq, topk_scores, topk_log_probs, topk_finished, states)
    finished_seq, finished_scores, finished_flags, _ = grow_finished(
        finished_seq, finished_scores, finished_flags, topk_seq, topk_scores,
        topk_finished)

    return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores,
            finished_flags, states)

  def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq,
                   finished_scores, unused_finished_in_finished, unused_states):
    """Checking termination condition.

    We terminate when we decoded up to decode_length or the lowest scoring item
    in finished has a greater score that the highest prob item in alive divided
    by the max length penalty

    Args:
      i: loop index
      alive_log_probs: probabilities of the beams. [batch_size, beam_size]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]

    Returns:
      Bool.
    """
    max_length_penalty = tf.pow(((5. + tf.cast(decode_length, tf.float32)) / 6.), alpha)
    # The best possible score of the most likely alive sequence.
    lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty

    if not stop_early:
      # by considering the min score (in the top N beams) we ensure that
      # the decoder will keep decoding until there is at least one beam
      # (in the top N) that can be improved (w.r.t. the alive beams).
      # any unfinished beam will have score -INF - thus the min
      # will always be -INF if there is at least one unfinished beam -
      # which means the bound_is_met condition cannot be true in this case.
      lowest_score_of_finished_in_finished = tf.reduce_min(finished_scores)
    else:
      # by taking the max score we only care about the the first beam;
      # as soon as this first beam cannot be beaten from the alive beams
      # the beam decoder can stop.
      # similarly to the above, if the top beam is not completed, its
      # finished_score is -INF, thus it will not activate the
      # bound_is_met condition. (i.e., decoder will keep going on).
      # note we need to find the max for every sequence eparately - so, we need
      # to keep the batch dimension (see axis=1)
      lowest_score_of_finished_in_finished = tf.reduce_max(finished_scores, axis=1)

    bound_is_met = tf.reduce_all(
        tf.greater(lowest_score_of_finished_in_finished,
                   lower_bound_alive_scores))

    return tf.logical_and(
        tf.less(i, decode_length), tf.logical_not(bound_is_met))

  (_, alive_seq, alive_log_probs, finished_seq, finished_scores,
   finished_flags, states) = tf.while_loop(
       _is_finished,
       inner_loop, [
           tf.constant(0), alive_seq, alive_log_probs, finished_seq,
           finished_scores, finished_flags, states
       ],
       shape_invariants=[
           tf.TensorShape([]),
           tf.TensorShape([None, None, None]),
           alive_log_probs.get_shape(),
           tf.TensorShape([None, None, None]),
           finished_scores.get_shape(),
           finished_flags.get_shape(),
           nest.map_structure(get_state_shape_invariants, states),
       ],
       parallel_iterations=1,
       back_prop=False)

  alive_seq.set_shape((None, beam_size, None))
  finished_seq.set_shape((None, beam_size, None))

  # Accounting for corner case: It's possible that no sequence in alive for a
  # particular batch item ever reached EOS. In that case, we should just copy
  # the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)
  # if 0, means that no sequence for that batch index had reached EOS. We need
  # to do the same for the scores as well.
  finished_seq = tf.where(
      tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
  finished_scores = tf.where(
      tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
  if return_states:
    return finished_seq, finished_scores, states
  return finished_seq, finished_scores
Exemplo n.º 19
0
    def forward(
        self,
        inputs,
        sequence_length=None,
        initial_state=None,
        memory=None,
        memory_sequence_length=None,
        input_fn=None,
        sampling_probability=None,
        training=None,
    ):
        """Runs the decoder on full sequences.

        Args:
          inputs: The 3D decoder input.
          sequence_length: The length of each input sequence.
          initial_state: The initial decoder state.
          memory: Memory values to query.
          memory_sequence_length: Memory values length.
          input_fn: A callable taking sampled ids and returning the decoding inputs.
          sampling_probability: The probability to read from the last sample instead
            of the true target.
          training: Run in training mode.

        Returns:
          A tuple with the logits, the decoder state, and the attention
          vector.
        """
        _ = sequence_length
        fused_projection = True
        if sampling_probability is not None:
            if input_fn is None:
                raise ValueError(
                    "input_fn is required when a sampling probability is set")
            if not tf.is_tensor(
                    sampling_probability) and sampling_probability == 0:
                sampling_probability = None
            else:
                fused_projection = False
                tf.summary.scalar("sampling_probability", sampling_probability)

        batch_size, max_step, _ = misc.shape_list(inputs)
        inputs_ta = tf.TensorArray(inputs.dtype, size=max_step)
        inputs_ta = inputs_ta.unstack(tf.transpose(inputs, perm=[1, 0, 2]))

        def _maybe_sample(true_inputs, logits):
            # Read from samples with a probability.
            draw = tf.random.uniform([batch_size])
            read_sample = tf.less(draw, sampling_probability)
            sampled_ids = tf.random.categorical(logits, 1)
            sampled_inputs = input_fn(tf.squeeze(sampled_ids, 1))
            inputs = tf.where(
                tf.broadcast_to(tf.expand_dims(read_sample, -1),
                                tf.shape(true_inputs)),
                x=sampled_inputs,
                y=true_inputs,
            )
            return inputs

        def _body(step, state, inputs, outputs_ta, attention_ta):
            outputs, state, attention = self.step(
                inputs,
                step,
                state=state,
                memory=memory,
                memory_sequence_length=memory_sequence_length,
                training=training,
            )
            next_inputs = tf.cond(
                step + 1 < max_step,
                true_fn=lambda: inputs_ta.read(step + 1),
                false_fn=lambda: tf.zeros_like(inputs),
            )
            if not fused_projection:
                outputs = self.output_layer(outputs)
            if sampling_probability is not None:
                next_inputs = _maybe_sample(next_inputs, outputs)
            outputs_ta = outputs_ta.write(step, outputs)
            if attention is not None:
                attention_ta = attention_ta.write(step, attention)
            return step + 1, state, next_inputs, outputs_ta, attention_ta

        step = tf.constant(0, dtype=tf.int32)
        outputs_ta = tf.TensorArray(inputs.dtype, size=max_step)
        attention_ta = tf.TensorArray(inputs.dtype, size=max_step)

        _, state, _, outputs_ta, attention_ta = tf.while_loop(
            lambda *arg: True,
            _body,
            loop_vars=(
                step,
                initial_state,
                inputs_ta.read(0),
                outputs_ta,
                attention_ta,
            ),
            parallel_iterations=32,
            swap_memory=True,
            maximum_iterations=max_step,
        )

        outputs = tf.transpose(outputs_ta.stack(), perm=[1, 0, 2])
        logits = self.output_layer(outputs) if fused_projection else outputs
        attention = None
        if self.support_alignment_history:
            attention = tf.transpose(attention_ta.stack(), perm=[1, 0, 2])
        return logits, state, attention