def add_char_embedding(self): with tf.device('/cpu:0'): with tf.variable_scope('char_embedding') as scope: embedding = tf.get_variable('char_embedding_mat', [self.loader.nb_chars+1, self.opts.chars_dim]) # +1 for padding inputs = tf.nn.embedding_lookup(embedding, self.inputs_placeholder_dict['chars']) ## [batch_size, seq_len, word_len, embedding_dim] inputs = tf.transpose(inputs, perm=[1, 0, 2, 3]) ## [seq_len, batch_size, word_len, embedding_dim] inputs = self.add_dropout(inputs, self.input_keep_prob) weights = get_char_weights(self.opts, 'char_encoding') inputs = encode_char(inputs, weights) ## [seq_len, batch_size, nb_filters] return inputs
def add_char_embedding(self): with tf.device('/cpu:0'): with tf.variable_scope('char_embedding') as scope: embedding = tf.get_variable('char_embedding_mat', [self.loader.nb_chars+1, self.opts.chars_dim]) # +1 for padding inputs = tf.nn.embedding_lookup(embedding, self.inputs_placeholder_dict['chars']) ## [batch_size, seq_len-1, word_len, embedding_dim] ## -1 because we don't have ROOT inputs = tf.transpose(inputs, perm=[1, 0, 2, 3]) ## [seq_len-1, batch_size, word_len, embedding_dim] inputs = self.add_dropout(inputs, self.input_keep_prob) weights = get_char_weights(self.opts, 'char_encoding') inputs = encode_char(inputs, weights) ## [seq_len-1, batch_size, nb_filters] shape = tf.shape(inputs) ## add 0 vectors for <-root-> inputs = tf.concat([tf.zeros([1, shape[1], shape[2]]), inputs], 0) return inputs