コード例 #1
0
ファイル: tf_rnn_cells.py プロジェクト: vilmarzti/deepwriting
 def latent_p_pi(self):
     input_ = tf.concat((self.latent_h), axis=1)
     with tf.compat.v1.variable_scope("latent_p_pi"):
         phi_pi = linear(input_,
                         self.h_dim,
                         self.activation_func,
                         batch_norm=self.use_batch_norm)
         self.logits_p_pi = linear(phi_pi,
                                   self.num_gmm_components,
                                   activation_fn=None,
                                   batch_norm=self.use_batch_norm)
コード例 #2
0
    def build_output_layer(self):
        """
        Builds a number fully connected layers projecting RNN predictions into an embedding space. Then, for each model
        output is predicted by a linear layer.
        """
        flat_outputs_hidden = self.flat_tensor(self.outputs)
        with tf.compat.v1.variable_scope('output_layer_hidden',
                                         reuse=self.reuse):
            flat_outputs_hidden = fully_connected_layer(
                flat_outputs_hidden, **self.output_layer_config)

        with tf.compat.v1.variable_scope("output_layer_char",
                                         reuse=self.reuse):
            self.flat_char_prediction = linear(
                input=flat_outputs_hidden,
                output_size=self.target_dims[0],
                activation_fn=self.output_layer_config['out_activation_fn'][0],
                is_training=self.is_training)
            self.char_prediction = self.temporal_tensor(
                self.flat_char_prediction)

        with tf.compat.v1.variable_scope("output_layer_eoc", reuse=self.reuse):
            self.flat_eoc_prediction = linear(
                input=flat_outputs_hidden,
                output_size=self.target_dims[1],
                activation_fn=self.output_layer_config['out_activation_fn'][1],
                is_training=self.is_training)
            self.eoc_prediction = self.temporal_tensor(
                self.flat_eoc_prediction)

        with tf.compat.v1.variable_scope("output_layer_bow", reuse=self.reuse):
            self.flat_bow_prediction = linear(
                input=flat_outputs_hidden,
                output_size=self.target_dims[2],
                activation_fn=self.output_layer_config['out_activation_fn'][2],
                is_training=self.is_training)
            self.bow_prediction = self.temporal_tensor(
                self.flat_bow_prediction)

        # Mask for precise loss calculation.
        self.input_mask = tf.expand_dims(
            tf.sequence_mask(lengths=self.input_seq_length,
                             maxlen=tf.reduce_max(self.input_seq_length),
                             dtype=tf.float32), -1)

        self.ops_evaluation['char_prediction'] = self.char_prediction
        self.ops_evaluation['eoc_prediction'] = self.eoc_prediction
        self.ops_evaluation['bow_prediction'] = self.bow_prediction
コード例 #3
0
ファイル: tf_rnn_cells.py プロジェクト: vilmarzti/deepwriting
 def output_layer(self):
     self.output_components = {}
     for key, size, activation_func in zip(
             self.output_config['keys'], self.output_config['dims'],
             self.output_config['activation_funcs']):
         with tf.compat.v1.variable_scope(key):
             output_component = linear(
                 self.phi_x_output,
                 size,
                 activation_fn=get_activation_fn(activation_func))
             self.output_components[key] = output_component
コード例 #4
0
ファイル: tf_rnn_cells.py プロジェクト: vilmarzti/deepwriting
    def latent(self, input_, scope):
        """
        Creates mu and sigma components of a latent distribution. Given an input layer, first applies a fully connected
        layer and then calculates mu & sigma.

        Args:
            input_:
            scope:

        Returns:

        """
        with tf.compat.v1.variable_scope(scope):
            latent_hidden = linear(input_,
                                   self.h_dim,
                                   self.activation_func,
                                   batch_norm=self.use_batch_norm)
            with tf.compat.v1.variable_scope("mu"):
                mu = linear(latent_hidden, self.z_dim)
            with tf.compat.v1.variable_scope("sigma"):
                sigma = linear(latent_hidden, self.z_dim, self.sigma_func)

            return mu, sigma
コード例 #5
0
ファイル: tf_rnn_cells.py プロジェクト: vilmarzti/deepwriting
    def phi(self, input_, scope, reuse=None):
        """
        A fully connected layer to increase model capacity and learn and intermediate representation. It is reported to
        be useful in https://arxiv.org/pdf/1506.02216.pdf

        Args:
            input_:
            scope:

        Returns:

        """
        with tf.compat.v1.variable_scope(scope, reuse=reuse):
            phi_hidden = input_
            for i in range(self.num_linear_layers):
                phi_hidden = linear(phi_hidden,
                                    self.h_dim,
                                    self.activation_func,
                                    batch_norm=self.use_batch_norm)

            return phi_hidden