Пример #1
0
 def horizontal_cell(self):
     """Cell that controls transition from left sibling to right sibling."""
     with tf.variable_scope("horizontal_cell") as scope:
         cell = rnn.create_multilayer_cell(self.rnn_cell, scope, self.dim,
                                           self.num_layers,
                                           self.tg_input_keep,
                                           self.tg_output_keep)
     return cell, scope
Пример #2
0
 def vertical_cell(self):
     """Cell that controls transition from parent to child."""
     with tf.variable_scope("vertical_cell") as scope:
         cell = rnn.create_multilayer_cell(self.rnn_cell, scope, self.dim,
                                           self.num_layers,
                                           self.tg_input_keep,
                                           self.tg_output_keep)
     return cell, scope
Пример #3
0
 def backward_cell(self):
     """RNN cell for the backward RNN."""
     with tf.variable_scope("backward_cell") as scope:
         cell = rnn.create_multilayer_cell(
             self.rnn_cell,
             scope,
             self.dim,
             self.num_layers,
             self.input_keep,
             self.output_keep,
             variational_recurrent=self.variational_recurrent_dropout,
             batch_normalization=self.recurrent_batch_normalization,
             forward_only=self.forward_only)
     return cell
Пример #4
0
 def decoder_cell(self):
     if self.use_copy and self.copy_fun != 'supervised':
         input_size = 2 * self.dim
     else:
         input_size = self.dim
     with tf.variable_scope(self.scope + "_decoder_cell") as scope:
         cell = rnn.create_multilayer_cell(
             self.rnn_cell, scope, self.dim, self.num_layers,
             self.input_keep, self.output_keep,
             variational_recurrent=self.variational_recurrent_dropout,
             batch_normalization=self.recurrent_batch_normalization,
             forward_only=self.forward_only,
             input_dim=input_size)
     return cell
Пример #5
0
    def char_channel_embeddings(self, channel_inputs):
        """
        Generate token representations by character composition.

        :param channel_inputs: batch input char indices
                [[batch, token_size], [batch, token_size], ...]
        :return: embeddings_char [source_vocab_size, char_channel_dim]
        """
        inputs = [
            tf.squeeze(x, 1)
            for x in tf.split(axis=1,
                              num_or_size_splits=self.max_source_token_size,
                              value=tf.concat(axis=0, values=channel_inputs))
        ]
        input_embeddings = [
            tf.nn.embedding_lookup(self.char_embeddings(), input)
            for input in inputs
        ]
        if self.sc_char_composition == 'rnn':
            with tf.variable_scope("encoder_char_rnn",
                                   reuse=self.char_rnn_vars) as scope:
                cell = rnn.create_multilayer_cell(
                    self.sc_char_rnn_cell,
                    scope,
                    self.sc_char_dim,
                    self.sc_char_rnn_num_layers,
                    variational_recurrent=self.variational_recurrent_dropout,
                    batch_normalization=self.recurrent_batch_normalization,
                    forward_only=self.forward_only)
                rnn_outputs, rnn_states = rnn.RNNModel(
                    cell,
                    input_embeddings,
                    num_cell_layers=self.sc_char_rnn_num_layers,
                    dtype=tf.float32)
                self.char_rnn_vars = True
        else:
            raise NotImplementedError

        return [
            tf.squeeze(x, 0) for x in tf.split(
                axis=0,
                num_or_size_splits=len(channel_inputs),
                value=tf.reshape(rnn_states[-1],
                                 [len(channel_inputs), -1, self.sc_char_dim]))
        ]