def _build(self, decoding_strategy="train_greedy", initial_state=None, inputs=None, memory=None, sequence_length=None, embedding=None, start_tokens=None, end_token=None, softmax_temperature=None, max_decoding_length=None, impute_finished=False, output_time_major=False, input_time_major=False, helper=None, mode=None, **kwargs): # Memory for _mechanism in self._cell._attention_mechanisms: _mechanism.initialize_memory(memory) # Helper if helper is not None: pass elif decoding_strategy is not None: if decoding_strategy == "train_greedy": helper = rnn_decoder_helpers._get_training_helper( inputs, sequence_length, embedding, input_time_major) elif decoding_strategy == "infer_greedy": helper = tx_helper.GreedyEmbeddingHelper( embedding, start_tokens, end_token) elif decoding_strategy == "infer_sample": helper = tx_helper.SampleEmbeddingHelper( embedding, start_tokens, end_token, softmax_temperature) else: raise ValueError( "Unknown decoding strategy: {}".format(decoding_strategy)) else: if is_train_mode_py(mode): kwargs_ = copy.copy(self._hparams.helper_train.kwargs.todict()) helper_type = self._hparams.helper_train.type else: kwargs_ = copy.copy(self._hparams.helper_infer.kwargs.todict()) helper_type = self._hparams.helper_infer.type kwargs_.update({ "inputs": inputs, "sequence_length": sequence_length, "time_major": input_time_major, "embedding": embedding, "start_tokens": start_tokens, "end_token": end_token, "softmax_temperature": softmax_temperature}) kwargs_.update(kwargs) helper = rnn_decoder_helpers.get_helper(helper_type, **kwargs_) self._helper = helper # Initial state if initial_state is not None: self._initial_state = initial_state else: self._initial_state = self.zero_state( batch_size=self.batch_size, dtype=tf.float32) # Maximum decoding length max_l = max_decoding_length if max_l is None: max_l_train = self._hparams.max_decoding_length_train if max_l_train is None: max_l_train = utils.MAX_SEQ_LENGTH max_l_infer = self._hparams.max_decoding_length_infer if max_l_infer is None: max_l_infer = utils.MAX_SEQ_LENGTH max_l = tf.cond(is_train_mode(mode), lambda: max_l_train, lambda: max_l_infer) self.max_decoding_length = max_l # Decode outputs, final_state, sequence_lengths = dynamic_decode( decoder=self, impute_finished=impute_finished, maximum_iterations=max_l, output_time_major=output_time_major) if not self._built: self._add_internal_trainable_variables() # Add trainable variables of `self._cell` which may be # constructed externally. self._add_trainable_variable( layers.get_rnn_cell_trainable_variables(self._cell)) if isinstance(self._output_layer, tf.layers.Layer): self._add_trainable_variable( self._output_layer.trainable_variables) # Add trainable variables of `self._beam_search_rnn_cell` which # may already be constructed and used. if self._beam_search_cell is not None: self._add_trainable_variable( self._beam_search_cell.trainable_variables) self._built = True return outputs, final_state, sequence_lengths
def _build(self, inputs, sequence_length=None, initial_state_fw=None, initial_state_bw=None, time_major=False, mode=None, return_cell_output=False, return_output_size=False, **kwargs): """Encodes the inputs. Args: inputs: A 3D Tensor of shape `[batch_size, max_time, dim]`. The first two dimensions `batch_size` and `max_time` may be exchanged if `time_major=True` is specified. sequence_length (optional): A 1D int tensor of shape `[batch_size]`. Sequence lengths of the batch inputs. Used to copy-through state and zero-out outputs when past a batch element's sequence length. initial_state (optional): Initial state of the RNN. time_major (bool): The shape format of the :attr:`inputs` and :attr:`outputs` Tensors. If `True`, these tensors are of shape `[max_time, batch_size, depth]`. If `False` (default), these tensors are of shape `[batch_size, max_time, depth]`. mode (optional): A tensor taking value in :tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, including `TRAIN`, `EVAL`, and `PREDICT`. Controls output layer dropout if the output layer is specified with :attr:`hparams`. If `None` (default), :func:`texar.tf.global_mode()` is used. return_cell_output (bool): Whether to return the output of the RNN cell. This is the results prior to the output layer. **kwargs: Optional keyword arguments of :tf_main:`tf.nn.dynamic_rnn <nn/dynamic_rnn>`, such as `swap_memory`, `dtype`, `parallel_iterations`, etc. Returns: - By default (both `return_cell_output` and `return_output_size` \ are False), returns a pair :attr:`(outputs, final_state)` - :attr:`outputs`: A tuple `(outputs_fw, outputs_bw)` \ containing \ the forward and the backward RNN outputs, each of which is of \ shape `[batch_size, max_time, output_dim]` if \ `time_major` is False, or \ `[max_time, batch_size, output_dim]` if \ `time_major` is True. \ If RNN cell output is a (nested) tuple of Tensors, then \ `outputs_fw` and `outputs_bw` will be a (nested) tuple having \ the same structure as the cell output. - :attr:`final_state`: A tuple \ `(final_state_fw, final_state_bw)` \ containing the final states of the forward and backward \ RNNs, each of which is a \ Tensor of shape `[batch_size] + cell.state_size`, or \ a (nested) tuple of Tensors if `cell.state_size` is a (nested)\ tuple. - If `return_cell_output` is True, returns a triple \ :attr:`(outputs, final_state, cell_outputs)` where - :attr:`cell_outputs`: A tuple \ `(cell_outputs_fw, cell_outputs_bw)` containting the outputs \ by the forward and backward RNN cells prior to the \ output layers, having the same structure with :attr:`outputs` \ except for the `output_dim`. - If `return_output_size` is True, returns a tuple \ :attr:`(outputs, final_state, output_size)` where - :attr:`output_size`: A tupple \ `(output_size_fw, output_size_bw)` containing the size of \ `outputs_fw` and `outputs_bw`, respectively. \ Take `*_fw` for example, \ `output_size_fw` is a (possibly nested tuple of) int. \ If a single int or an int array, then `outputs_fw` has shape \ `[batch/time, time/batch] + output_size_fw`. If \ a (nested) tuple, then `output_size_fw` has the same \ structure as with `outputs_fw`. The same applies to \ `output_size_bw`. - If both `return_cell_output` and \ `return_output_size` are True, returns \ :attr:`(outputs, final_state, cell_outputs, output_size)`. """ no_initial_state = initial_state_fw is None and initial_state_bw is None if ('dtype' not in kwargs) and no_initial_state: cell_outputs, states = tf.nn.bidirectional_dynamic_rnn( cell_fw=self._cell_fw, cell_bw=self._cell_bw, inputs=inputs, sequence_length=sequence_length, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw, time_major=time_major, dtype=tf.float32, **kwargs) else: cell_outputs, states = tf.nn.bidirectional_dynamic_rnn( cell_fw=self._cell_fw, cell_bw=self._cell_bw, inputs=inputs, sequence_length=sequence_length, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw, time_major=time_major, **kwargs) outputs_fw, output_size_fw = _apply_rnn_encoder_output_layer( self._output_layer_fw, time_major, self._output_layer_hparams_fw, mode, cell_outputs[0], self._cell_fw.output_size) outputs_bw, output_size_bw = _apply_rnn_encoder_output_layer( self._output_layer_bw, time_major, self._output_layer_hparams_bw, mode, cell_outputs[1], self._cell_bw.output_size) outputs = (outputs_fw, outputs_bw) output_size = (output_size_fw, output_size_bw) if not self._built: self._add_internal_trainable_variables() # Add trainable variables of cells and output layers # which may be constructed externally. self._add_trainable_variable( layers.get_rnn_cell_trainable_variables(self._cell_fw)) self._add_trainable_variable( layers.get_rnn_cell_trainable_variables(self._cell_bw)) if self._output_layer_fw and \ not isinstance(self._output_layer_fw, (list, tuple)): self._add_trainable_variable( self._output_layer_fw.trainable_variables) if self._output_layer_bw and \ not isinstance(self._output_layer_bw, (list, tuple)): self._add_trainable_variable( self._output_layer_bw.trainable_variables) self._built = True returns = (outputs, states) if return_cell_output: returns += (cell_outputs, ) if return_output_size: returns += (output_size, ) return returns
def _build(self, decoding_strategy="train_greedy", initial_state=None, inputs=None, sequence_length=None, embedding=None, start_tokens=None, end_token=None, softmax_temperature=None, max_decoding_length=None, impute_finished=False, output_time_major=False, input_time_major=False, helper=None, mode=None, **kwargs): """Performs decoding. This is a shared interface for both :class:`~texar.tf.modules.BasicRNNDecoder` and :class:`~texar.tf.modules.AttentionRNNDecoder`. The function provides **3 ways** to specify the decoding method, with varying flexibility: 1. The :attr:`decoding_strategy` argument: A string taking value of: - **"train_greedy"**: decoding in teacher-forcing fashion \ (i.e., feeding \ `ground truth` to decode the next step), and each sample is \ obtained by taking the `argmax` of the RNN output logits. \ Arguments :attr:`(inputs, sequence_length, input_time_major)` \ are required for this strategy, and argument :attr:`embedding` \ is optional. - **"infer_greedy"**: decoding in inference fashion (i.e., feeding \ the `generated` sample to decode the next step), and each sample\ is obtained by taking the `argmax` of the RNN output logits.\ Arguments :attr:`(embedding, start_tokens, end_token)` are \ required for this strategy, and argument \ :attr:`max_decoding_length` is optional. - **"infer_sample"**: decoding in inference fashion, and each sample is obtained by `random sampling` from the RNN output distribution. Arguments \ :attr:`(embedding, start_tokens, end_token)` are \ required for this strategy, and argument \ :attr:`max_decoding_length` is optional. This argument is used only when argument :attr:`helper` is `None`. Example: .. code-block:: python embedder = WordEmbedder(vocab_size=data.vocab.size) decoder = BasicRNNDecoder(vocab_size=data.vocab.size) # Teacher-forcing decoding outputs_1, _, _ = decoder( decoding_strategy='train_greedy', inputs=embedder(data_batch['text_ids']), sequence_length=data_batch['length']-1) # Random sample decoding. Gets 100 sequence samples outputs_2, _, sequence_length = decoder( decoding_strategy='infer_sample', start_tokens=[data.vocab.bos_token_id]*100, end_token=data.vocab.eos.token_id, embedding=embedder, max_decoding_length=60) 2. The :attr:`helper` argument: An instance of subclass of \ :class:`texar.tf.modules.Helper`. This provides a superset of decoding strategies than above, for example: - :class:`~texar.tf.modules.TrainingHelper` corresponding to the \ "train_greedy" strategy. - :class:`~texar.tf.modules.GreedyEmbeddingHelper` and \ :class:`~texar.tf.modules.SampleEmbeddingHelper` corresponding to \ the "infer_greedy" and "infer_sample", respectively. - :class:`~texar.tf.modules.TopKSampleEmbeddingHelper` for Top-K \ sample decoding. - :class:`ScheduledEmbeddingTrainingHelper` and \ :class:`ScheduledOutputTrainingHelper` for scheduled \ sampling. - :class:`~texar.tf.modules.SoftmaxEmbeddingHelper` and \ :class:`~texar.tf.modules.GumbelSoftmaxEmbeddingHelper` for \ soft decoding and gradient backpropagation. Helpers give the maximal flexibility of configuring the decoding\ strategy. Example: .. code-block:: python embedder = WordEmbedder(vocab_size=data.vocab.size) decoder = BasicRNNDecoder(vocab_size=data.vocab.size) # Teacher-forcing decoding, same as above with # `decoding_strategy='train_greedy'` helper_1 = tx.modules.TrainingHelper( inputs=embedders(data_batch['text_ids']), sequence_length=data_batch['length']-1) outputs_1, _, _ = decoder(helper=helper_1) # Gumbel-softmax decoding helper_2 = GumbelSoftmaxEmbeddingHelper( embedding=embedder, start_tokens=[data.vocab.bos_token_id]*100, end_token=data.vocab.eos_token_id, tau=0.1) outputs_2, _, sequence_length = decoder( max_decoding_length=60, helper=helper_2) 3. :attr:`hparams["helper_train"]` and :attr:`hparams["helper_infer"]`:\ Specifying the helper through hyperparameters. Train and infer \ strategy is toggled based on :attr:`mode`. Appriopriate arguments \ (e.g., :attr:`inputs`, :attr:`start_tokens`, etc) are selected to \ construct the helper. Additional arguments for helper constructor \ can be provided either through :attr:`**kwargs`, or through \ :attr:`hparams["helper_train/infer"]["kwargs"]`. This means is used only when both :attr:`decoding_strategy` and \ :attr:`helper` are `None`. Example: .. code-block:: python h = { "helper_infer": { "type": "GumbelSoftmaxEmbeddingHelper", "kwargs": { "tau": 0.1 } } } embedder = WordEmbedder(vocab_size=data.vocab.size) decoder = BasicRNNDecoder(vocab_size=data.vocab.size, hparams=h) # Gumbel-softmax decoding output, _, _ = decoder( decoding_strategy=None, # Sets to None explicit embedding=embedder, start_tokens=[data.vocab.bos_token_id]*100, end_token=data.vocab.eos_token_id, max_decoding_length=60, mode=tf.estimator.ModeKeys.PREDICT) # PREDICT mode also shuts down dropout Args: decoding_strategy (str): A string specifying the decoding strategy. Different arguments are required based on the strategy. Ignored if :attr:`helper` is given. initial_state (optional): Initial state of decoding. If `None` (default), zero state is used. inputs (optional): Input tensors for teacher forcing decoding. Used when `decoding_strategy` is set to "train_greedy", or when `hparams`-configured helper is used. - If :attr:`embedding` is `None`, `inputs` is directly \ fed to the decoder. E.g., in `"train_greedy"` strategy, \ `inputs` must be a 3D Tensor of shape \ `[batch_size, max_time, emb_dim]` (or \ `[max_time, batch_size, emb_dim]` if `input_time_major`==True). - If `embedding` is given, `inputs` is used as index \ to look up embeddings and feed in the decoder. \ E.g., if `embedding` is an instance of \ :class:`~texar.tf.modules.WordEmbedder`, \ then :attr:`inputs` is usually a 2D int Tensor \ `[batch_size, max_time]` (or \ `[max_time, batch_size]` if `input_time_major`==True) \ containing the token indexes. sequence_length (optional): A 1D int Tensor containing the sequence length of :attr:`inputs`. Used when `decoding_strategy="train_greedy"` or `hparams`-configured helper is used. embedding (optional): Embedding used when: - "infer_greedy" or "infer_sample" `decoding_strategy` is \ used. This can be a callable or the `params` argument for \ :tf_main:`embedding_lookup <nn/embedding_lookup>`. \ If a callable, it can take a vector tensor of token `ids`, \ or take two arguments (`ids`, `times`), where `ids` \ is a vector tensor of token ids, and `times` is a vector tensor\ of time steps (i.e., position ids). The latter case can be used\ when attr:`embedding` is a combination of word embedding and\ position embedding. `embedding` is required in this case. - "train_greedy" `decoding_strategy` is used.\ This can be a callable or the `params` argument for \ :tf_main:`embedding_lookup <nn/embedding_lookup>`. \ If a callable, it can take :attr:`inputs` and returns \ the input embedding. `embedding` is optional in this case. start_tokens (optional): A int Tensor of shape `[batch_size]`, the start tokens. Used when `decoding_strategy="infer_greedy"` or `"infer_sample"`, or when the helper specified in `hparams` is used. Example: .. code-block:: python data = tx.data.MonoTextData(hparams) iterator = DataIterator(data) batch = iterator.get_next() bos_token_id = data.vocab.bos_token_id start_tokens=tf.ones_like(batch['length'])*bos_token_id end_token (optional): A int 0D Tensor, the token that marks end of decoding. Used when `decoding_strategy="infer_greedy"` or `"infer_sample"`, or when the helper specified in `hparams` is used. softmax_temperature (optional): A float 0D Tensor, value to divide the logits by before computing the softmax. Larger values (above 1.0) result in more random samples. Must > 0. If `None`, 1.0 is used. Used when `decoding_strategy="infer_sample"`. max_decoding_length: A int scalar Tensor indicating the maximum allowed number of decoding steps. If `None` (default), either `hparams["max_decoding_length_train"]` or `hparams["max_decoding_length_infer"]` is used according to :attr:`mode`. impute_finished (bool): If `True`, then states for batch entries which are marked as finished get copied through and the corresponding outputs get zeroed out. This causes some slowdown at each time step, but ensures that the final state and outputs have the correct values and that backprop ignores time steps that were marked as finished. output_time_major (bool): If `True`, outputs are returned as time major tensors. If `False` (default), outputs are returned as batch major tensors. input_time_major (optional): Whether the :attr:`inputs` tensor is time major. Used when `decoding_strategy="train_greedy"` or `hparams`-configured helper is used. helper (optional): An instance of :class:`texar.tf.modules.Helper` that defines the decoding strategy. If given, `decoding_strategy` and helper configs in :attr:`hparams` are ignored. mode (str, optional): A string taking value in :tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`. If `TRAIN`, training related hyperparameters are used (e.g., `hparams['max_decoding_length_train']`), otherwise, inference related hyperparameters are used (e.g., `hparams['max_decoding_length_infer']`). If `None` (default), `TRAIN` mode is used. **kwargs: Other keyword arguments for constructing helpers defined by `hparams["helper_trainn"]` or `hparams["helper_infer"]`. Returns: `(outputs, final_state, sequence_lengths)`, where - **`outputs`**: an object containing the decoder output on all \ time steps. - **`final_state`**: is the cell state of the final time step. - **`sequence_lengths`**: is an int Tensor of shape `[batch_size]` \ containing the length of each sample. """ # Helper if helper is not None: pass elif decoding_strategy is not None: if decoding_strategy == "train_greedy": helper = rnn_decoder_helpers._get_training_helper( inputs, sequence_length, embedding, input_time_major) elif decoding_strategy == "infer_greedy": helper = tx_helper.GreedyEmbeddingHelper( embedding, start_tokens, end_token) elif decoding_strategy == "infer_sample": helper = tx_helper.SampleEmbeddingHelper( embedding, start_tokens, end_token, softmax_temperature) else: raise ValueError( "Unknown decoding strategy: {}".format(decoding_strategy)) else: if is_train_mode_py(mode): kwargs_ = copy.copy(self._hparams.helper_train.kwargs.todict()) helper_type = self._hparams.helper_train.type else: kwargs_ = copy.copy(self._hparams.helper_infer.kwargs.todict()) helper_type = self._hparams.helper_infer.type kwargs_.update({ "inputs": inputs, "sequence_length": sequence_length, "time_major": input_time_major, "embedding": embedding, "start_tokens": start_tokens, "end_token": end_token, "softmax_temperature": softmax_temperature }) kwargs_.update(kwargs) helper = rnn_decoder_helpers.get_helper(helper_type, **kwargs_) self._helper = helper # Initial state if initial_state is not None: self._initial_state = initial_state else: self._initial_state = self.zero_state(batch_size=self.batch_size, dtype=tf.float32) # Maximum decoding length max_l = max_decoding_length if max_l is None: max_l_train = self._hparams.max_decoding_length_train if max_l_train is None: max_l_train = utils.MAX_SEQ_LENGTH max_l_infer = self._hparams.max_decoding_length_infer if max_l_infer is None: max_l_infer = utils.MAX_SEQ_LENGTH max_l = tf.cond(is_train_mode(mode), lambda: max_l_train, lambda: max_l_infer) self.max_decoding_length = max_l # Decode outputs, final_state, sequence_lengths = dynamic_decode( decoder=self, impute_finished=impute_finished, maximum_iterations=max_l, output_time_major=output_time_major) if not self._built: self._add_internal_trainable_variables() # Add trainable variables of `self._cell` which may be # constructed externally. self._add_trainable_variable( layers.get_rnn_cell_trainable_variables(self._cell)) if isinstance(self._output_layer, tf.layers.Layer): self._add_trainable_variable( self._output_layer.trainable_variables) # Add trainable variables of `self._beam_search_rnn_cell` which # may already be constructed and used. if self._beam_search_cell is not None: self._add_trainable_variable( self._beam_search_cell.trainable_variables) self._built = True return outputs, final_state, sequence_lengths
def _build(self, inputs, sequence_length=None, initial_state=None, time_major=False, mode=None, return_cell_output=False, return_output_size=False, **kwargs): """Encodes the inputs. Args: inputs: A 3D Tensor of shape `[batch_size, max_time, dim]`. The first two dimensions :attr:`batch_size` and :attr:`max_time` are exchanged if :attr:`time_major=True` is specified. sequence_length (optional): A 1D int tensor of shape `[batch_size]`. Sequence lengths of the batch inputs. Used to copy-through state and zero-out outputs when past a batch element's sequence length. initial_state (optional): Initial state of the RNN. time_major (bool): The shape format of the :attr:`inputs` and :attr:`outputs` Tensors. If `True`, these tensors are of shape `[max_time, batch_size, depth]`. If `False` (default), these tensors are of shape `[batch_size, max_time, depth]`. mode (optional): A tensor taking value in :tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, including `TRAIN`, `EVAL`, and `PREDICT`. Controls output layer dropout if the output layer is specified with :attr:`hparams`. If `None` (default), :func:`texar.tf.global_mode` is used. return_cell_output (bool): Whether to return the output of the RNN cell. This is the results prior to the output layer. return_output_size (bool): Whether to return the size of the output (i.e., the results after output layers). **kwargs: Optional keyword arguments of :tf_main:`tf.nn.dynamic_rnn <nn/dynamic_rnn>`, such as `swap_memory`, `dtype`, `parallel_iterations`, etc. Returns: - By default (both `return_cell_output` and \ `return_output_size` are False), returns a pair \ :attr:`(outputs, final_state)` - :attr:`outputs`: The RNN output tensor by the output layer \ (if exists) or the RNN cell (otherwise). The tensor is of \ shape `[batch_size, max_time, output_size]` if \ `time_major` is False, or \ `[max_time, batch_size, output_size]` if \ `time_major` is True. \ If RNN cell output is a (nested) tuple of Tensors, then the \ :attr:`outputs` will be a (nested) tuple having the same \ nest structure as the cell output. - :attr:`final_state`: The final state of the RNN, which is a \ Tensor of shape `[batch_size] + cell.state_size` or \ a (nested) tuple of Tensors if `cell.state_size` is a (nested)\ tuple. - If `return_cell_output` is True, returns a triple \ :attr:`(outputs, final_state, cell_outputs)` - :attr:`cell_outputs`: The outputs by the RNN cell prior to \ the \ output layer, having the same structure with :attr:`outputs` \ except for the `output_dim`. - If `return_output_size` is `True`, returns a tuple \ :attr:`(outputs, final_state, output_size)` - :attr:`output_size`: A (possibly nested tuple of) int \ representing the size of :attr:`outputs`. If a single int or \ an int array, then `outputs` has shape \ `[batch/time, time/batch] + output_size`. If \ a (nested) tuple, then `output_size` has the same \ structure as with `outputs`. - If both `return_cell_output` and \ `return_output_size` are True, returns \ :attr:`(outputs, final_state, cell_outputs, output_size)`. """ if ('dtype' not in kwargs) and (initial_state is None): cell_outputs, state = tf.nn.dynamic_rnn( cell=self._cell, inputs=inputs, sequence_length=sequence_length, initial_state=initial_state, time_major=time_major, dtype=tf.float32, **kwargs) else: cell_outputs, state = tf.nn.dynamic_rnn( cell=self._cell, inputs=inputs, sequence_length=sequence_length, initial_state=initial_state, time_major=time_major, **kwargs) outputs, output_size = _apply_rnn_encoder_output_layer( self._output_layer, time_major, self._output_layer_hparams, mode, cell_outputs, self._cell.output_size) if not self._built: self._add_internal_trainable_variables() # Add trainable variables of `self._cell` and `self._output_layer` # which may be constructed externally. self._add_trainable_variable( layers.get_rnn_cell_trainable_variables(self._cell)) if self._output_layer and \ not isinstance(self._output_layer, (list, tuple)): self._add_trainable_variable( self._output_layer.trainable_variables) self._built = True rets = (outputs, state) if return_cell_output: rets += (cell_outputs, ) if return_output_size: rets += (output_size, ) return rets