def _add_embedding_lookup(self): with tf.variable_scope('word_embeddings'): if self.cfg.use_word_emb: _word_emb = tf.Variable(self.cfg.word_emb, name='_word_emb', trainable=self.cfg.finetune_emb, dtype=tf.float32) else: _word_emb = tf.get_variable(name='_word_emb', shape=[self.cfg.vocab_size, self.cfg.word_dim], trainable=True, dtype=tf.float32) word_emb = tf.nn.embedding_lookup(_word_emb, self.word_ids, name='word_emb') if self.cfg.use_char_emb: # use cnn to generate chars representation with tf.variable_scope('char_embeddings'): _char_emb = tf.get_variable(name='_char_emb', dtype=tf.float32, trainable=True, shape=[self.cfg.char_vocab_size, self.cfg.char_dim]) char_emb = tf.nn.embedding_lookup(_char_emb, self.char_ids, name='char_emb') char_emb_shape = tf.shape(char_emb) char_rep = multi_conv1d(char_emb, self.cfg.filter_sizes, self.cfg.heights, "VALID", self.is_train, self.cfg.keep_prob, scope="char_cnn") char_rep = tf.reshape(char_rep, [char_emb_shape[0], char_emb_shape[1], self.cfg.char_rep_dim]) word_emb = tf.concat([word_emb, char_rep], axis=-1) # concat word emb and corresponding char rep if self.cfg.use_highway: self.word_emb = highway_network(word_emb, self.cfg.highway_num_layers, bias=True, is_train=self.is_train, keep_prob=self.cfg.keep_prob) else: self.word_emb = dropout(word_emb, keep_prob=self.cfg.keep_prob, is_train=self.is_train) print('word embedding shape: {}'.format(self.word_emb.get_shape().as_list()))
def _build_embedding_op(self): with tf.variable_scope("embeddings"): if not self.cfg["use_pretrained"]: self.word_embeddings = tf.get_variable(name="emb", dtype=tf.float32, trainable=True, shape=[self.word_vocab_size, self.cfg["emb_dim"]]) else: word_emb_1 = tf.Variable(np.load(self.cfg["pretrained_emb"])["embeddings"], name="word_emb_1", dtype=tf.float32, trainable=self.cfg["tuning_emb"]) word_emb_2 = tf.get_variable(name="word_emb_2", shape=[3, self.cfg["emb_dim"]], dtype=tf.float32, trainable=True) # For UNK, NUM and END self.word_embeddings = tf.concat([word_emb_1, word_emb_2], axis=0) word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.words, name="word_emb") print("word embedding shape: {}".format(word_emb.get_shape().as_list())) if self.cfg["use_chars"]: self.char_embeddings = tf.get_variable(name="c_emb", dtype=tf.float32, trainable=True, shape=[self.char_vocab_size, self.cfg["char_emb_dim"]]) char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.chars, name="chars_emb") # train char representation if self.cfg["char_represent_method"] == "rnn": char_bi_rnn = BiRNN(self.cfg["char_num_units"], cell_type=self.cfg["cell_type"], scope="c_bi_rnn") char_represent = char_bi_rnn(char_emb, self.char_seq_len, use_last_state=True) else: char_represent = multi_conv1d(char_emb, self.filter_sizes, self.channel_sizes, drop_rate=self.drop_rate, is_train=self.is_train) print("chars representation shape: {}".format(char_represent.get_shape().as_list())) word_emb = tf.concat([word_emb, char_represent], axis=-1) if self.cfg["use_highway"]: self.word_emb = highway_network(word_emb, self.cfg["highway_layers"], use_bias=True, bias_init=0.0, keep_prob=self.keep_prob, is_train=self.is_train) else: self.word_emb = tf.layers.dropout(word_emb, rate=self.drop_rate, training=self.is_train) print("word and chars concatenation shape: {}".format(self.word_emb.get_shape().as_list()))
def _build_embeddings_op(self): with tf.variable_scope('words'): if self.cfg.use_pretrained: _word_embeddings = tf.Variable(self.cfg.glove_embeddings, name='_word_embeddings', dtype=tf.float32, trainable=self.cfg.finetune_emb) else: _word_embeddings = tf.get_variable( name='_word_embeddings', dtype=tf.float32, trainable=True, shape=[self.cfg.word_vocab_size, self.cfg.word_dim]) word_embeddings = tf.nn.embedding_lookup(_word_embeddings, self.word_ids, name="word_embeddings") with tf.variable_scope('char_represent'): if self.cfg.use_char_emb: _char_embeddings = tf.get_variable( name='_char_embeddings', dtype=tf.float32, trainable=True, shape=[self.cfg.char_vocab_size, self.cfg.char_dim]) char_embeddings = tf.nn.embedding_lookup( _char_embeddings, self.char_ids, name="char_embeddings") s = tf.shape( char_embeddings ) # [batch size, max length of sentence, max length of word, char_dim] output = multi_conv1d(char_embeddings, self.cfg.filter_sizes, self.cfg.heights, "VALID", self.is_train, self.keep_prob, scope="char_cnn") # shape = (batch size, max sentence length, char representation size) self.char_output = tf.reshape( output, [s[0], s[1], self.cfg.char_out_size]) word_embeddings = tf.concat( [word_embeddings, self.char_output], axis=-1) if self.cfg.use_highway: with tf.variable_scope("highway"): self.word_embeddings = highway_network( word_embeddings, self.cfg.highway_num_layers, bias=True, is_train=self.is_train, keep_prob=self.keep_prob) else: # directly dropout before model_op self.word_embeddings = dropout(word_embeddings, keep_prob=self.keep_prob, is_train=self.is_train) print('word embeddings shape: {}'.format( self.word_embeddings.get_shape().as_list()))
def _add_embedding_lookup(self): with tf.variable_scope('word_embeddings'): if self.cfg.use_word_emb: _word_emb = tf.Variable(self.cfg.word_emb, name='_word_emb', trainable=self.cfg.finetune_emb, dtype=tf.float32) else: _word_emb = tf.get_variable( name='_word_emb', shape=[self.cfg.vocab_size, self.cfg.word_dim], trainable=True, dtype=tf.float32) word_emb = tf.nn.embedding_lookup(_word_emb, self.word_ids, name='word_emb') if self.cfg.use_char_emb: # use cnn to generate chars representation with tf.variable_scope('char_embeddings'): _char_emb = tf.get_variable( name='_char_emb', dtype=tf.float32, trainable=True, shape=[self.cfg.char_vocab_size, self.cfg.char_dim]) char_emb = tf.nn.embedding_lookup(_char_emb, self.char_ids, name='char_emb') char_emb_shape = tf.shape(char_emb) char_rep = multi_conv1d(char_emb, self.cfg.filter_sizes, self.cfg.heights, "VALID", self.is_train, self.cfg.keep_prob, scope="char_cnn") char_rep = tf.reshape(char_rep, [ char_emb_shape[0], char_emb_shape[1], self.cfg.char_rep_dim ]) word_emb = tf.concat( [word_emb, char_rep], axis=-1) # concat word emb and corresponding char rep if self.cfg.use_highway: self.word_emb = highway_network(word_emb, self.cfg.highway_num_layers, bias=True, is_train=self.is_train, keep_prob=self.cfg.keep_prob) else: self.word_emb = dropout(word_emb, keep_prob=self.cfg.keep_prob, is_train=self.is_train) print('word embedding shape: {}'.format( self.word_emb.get_shape().as_list()))
def _build_embeddings_op(self): with tf.variable_scope('words'): if self.cfg.use_pretrained: _word_embeddings = tf.Variable(self.cfg.glove_embeddings, name='_word_embeddings', dtype=tf.float32, trainable=self.cfg.finetune_emb) else: _word_embeddings = tf.get_variable( name='_word_embeddings', dtype=tf.float32, trainable=True, shape=[self.cfg.word_vocab_size, self.cfg.word_dim]) word_embeddings = tf.nn.embedding_lookup(_word_embeddings, self.word_ids, name="word_embeddings") with tf.variable_scope('char_rep_method'): if self.cfg.use_char_emb: _char_embeddings = tf.get_variable( name='_char_embeddings', dtype=tf.float32, trainable=True, shape=[self.cfg.char_vocab_size, self.cfg.char_dim]) char_embeddings = tf.nn.embedding_lookup( _char_embeddings, self.char_ids, name="char_embeddings") s = tf.shape( char_embeddings ) # [batch size, max length of sentence, max length of word, char_dim] if self.cfg.char_rep_method == 'rnn': char_embeddings = tf.reshape( char_embeddings, shape=[s[0] * s[1], s[-2], self.cfg.char_dim]) word_lengths = tf.reshape(self.word_lengths, shape=[s[0] * s[1]]) char_bi_rnn = BiRNN(self.cfg.num_units_char, scope='char_rnn') output = char_bi_rnn(char_embeddings, word_lengths, return_last_state=True) else: # cnn model for char representation output = multi_conv1d(char_embeddings, self.cfg.filter_sizes, self.cfg.heights, "VALID", self.is_train, self.keep_prob, scope="char_cnn") # shape = (batch size, max sentence length, char representation size) self.char_output = tf.reshape( output, [s[0], s[1], self.cfg.char_out_size]) word_embeddings = tf.concat( [word_embeddings, self.char_output], axis=-1) if self.cfg.use_highway: with tf.variable_scope("highway"): self.word_embeddings = highway_network( word_embeddings, self.cfg.highway_num_layers, bias=True, is_train=self.is_train, keep_prob=self.keep_prob) else: # directly dropout before model_op self.word_embeddings = dropout(word_embeddings, keep_prob=self.keep_prob, is_train=self.is_train)
def _add_embedding_lookup(self): with tf.variable_scope('word_embeddings'): if self.cfg.use_word_emb: #注意这种用法 声明具有初始值的变量 _word_emb = tf.Variable(self.cfg.word_emb, name='_word_emb', trainable=self.cfg.finetune_emb, dtype=tf.float32) else: #声明一般变量 _word_emb = tf.get_variable( name='_word_emb', shape=[self.cfg.vocab_size, self.cfg.word_dim], trainable=True, dtype=tf.float32) word_emb = tf.nn.embedding_lookup(_word_emb, self.word_ids, name='word_emb') if self.cfg.use_char_emb: # use cnn to generate chars representation with tf.variable_scope('char_embeddings'): _char_emb = tf.get_variable( name='_char_emb', dtype=tf.float32, trainable=True, shape=[self.cfg.char_vocab_size, self.cfg.char_dim]) #在理解的时候采用一个样本进行理解 char_emb = tf.nn.embedding_lookup( _char_emb, self.char_ids, name='char_emb' ) # 这里我要搞清楚这个更!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! print("许海明的测试-------》char_emb的形状", char_emb.get_shape().as_list()) #这一步类似于一个图片 不过不是RGB 而是一句话最长的单词的数目 char_emb_shape = tf.shape(char_emb) #这里需要注意一下 这里的形状要搞清楚 # [-1,max_len_sen,char_out_size*len([fileter])] char_rep = multi_conv1d(char_emb, self.cfg.filter_sizes, self.cfg.heights, "VALID", self.is_train, self.cfg.keep_prob, scope="char_cnn") char_rep = tf.reshape(char_rep, [ char_emb_shape[0], char_emb_shape[1], self.cfg.char_rep_dim ]) word_emb = tf.concat( [word_emb, char_rep], axis=-1) # concat word emb and corresponding char rep if self.cfg.use_highway: self.word_emb = highway_network(word_emb, self.cfg.highway_num_layers, bias=True, is_train=self.is_train, keep_prob=self.cfg.keep_prob) else: self.word_emb = dropout(word_emb, keep_prob=self.cfg.keep_prob, is_train=self.is_train) print('word embedding shape: {}'.format( self.word_emb.get_shape().as_list()))