def add_adversarial_loss(self, loss):
     ## self.adv_embedding. The last element is char embedddings, which need computation
     ## run char computation
     gradients = tf.gradients(loss, self.adv_embedding)
     new_embeddings = []
     for i, embedding in enumerate(self.adv_embedding):
         normalized_gradient = tf.stop_gradient(
             gradients[i] /
             tf.norm(gradients[i], axis=-1,
                     keep_dims=True))  ## do not take second-order gradient
         epsilon = 0.001 * tf.sqrt(
             tf.cast(tf.shape(embedding)[-1], tf.float32))
         new_embedding = embedding + epsilon * normalized_gradient
         if i == len(self.adv_embedding) - 1:  ## char computation
             if self.opts.chars_dim > 0:
                 new_embedding = self.add_dropout(new_embedding,
                                                  self.input_keep_prob)
                 new_embedding = encode_char(
                     new_embedding, self.char_weights
                 )  ## [seq_len, batch_size, nb_filters]
         new_embeddings.append(new_embedding)
     inputs_tensor = tf.concat(new_embeddings,
                               2)  ## [seq_len, batch_size, inputs_dim]
     adv_loss, projected_outputs = self.feed_network_inputs(inputs_tensor,
                                                            adv=True)
     return adv_loss, projected_outputs
Esempio n. 2
0
    def add_char_embedding(self):
        with tf.device('/cpu:0'):
            with tf.variable_scope('char_embedding') as scope:
                embedding = tf.get_variable('char_embedding_mat', [self.loader.nb_chars+1, self.opts.chars_dim]) # +1 for padding

            inputs = tf.nn.embedding_lookup(embedding, self.inputs_placeholder_dict['chars']) ## [batch_size, seq_len, word_len, embedding_dim]
            inputs = tf.transpose(inputs, perm=[1, 0, 2, 3])
            ## [seq_len, batch_size, word_len, embedding_dim]
            inputs = self.add_dropout(inputs, self.input_keep_prob)
            weights = get_char_weights(self.opts, 'char_encoding')
            inputs = encode_char(inputs, weights) ## [seq_len, batch_size, nb_filters]
        return inputs 
Esempio n. 3
0
    def add_char_embedding(self):
        with tf.device('/cpu:0'):
            with tf.variable_scope('char_embedding') as scope:
                embedding = tf.get_variable('char_embedding_mat', [self.loader.nb_chars+1, self.opts.chars_dim]) # +1 for padding

            inputs = tf.nn.embedding_lookup(embedding, self.inputs_placeholder_dict['chars']) ## [batch_size, seq_len-1, word_len, embedding_dim] 
            ## -1 because we don't have ROOT
            inputs = tf.transpose(inputs, perm=[1, 0, 2, 3])
            ## [seq_len-1, batch_size, word_len, embedding_dim]
            inputs = self.add_dropout(inputs, self.input_keep_prob)
            weights = get_char_weights(self.opts, 'char_encoding')
            inputs = encode_char(inputs, weights) ## [seq_len-1, batch_size, nb_filters]
            shape = tf.shape(inputs)
            ## add 0 vectors for <-root->
            inputs = tf.concat([tf.zeros([1, shape[1], shape[2]]), inputs], 0)
        return inputs