def _build_model_op(self): with tf.variable_scope('bidirectional_rnn'): if self.cfg.mode_type == 'stack': # n-layers stacked bidirectional rnn rnns = StackBiRNN(self.cfg.num_layers, self.cfg.num_units, scope='stack_mode') output = rnns(self.word_embeddings, self.seq_lengths) output = dropout(output, keep_prob=self.keep_prob, is_train=self.is_train) elif self.cfg.mode_type == 'densely_connected': rnns = DenseConnectBiRNN(self.cfg.num_layers_dc, self.cfg.num_units_list) output = rnns(self.word_embeddings, self.seq_lengths) output = dropout(output, keep_prob=self.keep_prob, is_train=self.is_train) else: # default single model rnns = BiRNN(self.cfg.num_units, scope='single_mode') output = rnns(self.word_embeddings, self.seq_lengths) output = dropout(output, keep_prob=self.keep_prob, is_train=self.is_train) self.logits = dense(output, self.cfg.tag_vocab_size, use_bias=True, scope='project')
def _build_embeddings_op(self): tf.summary.scalar('dropout_keep_probability', self.keep_prob) tf.summary.scalar("learning_rate", self.lr) with tf.variable_scope('words'): if self.cfg.use_pretrained: _word_embeddings = tf.Variable(self.cfg.glove_embeddings, name='_word_embeddings', dtype=tf.float32, trainable=self.cfg.finetune_emb) else: _word_embeddings = tf.get_variable(name='_word_embeddings', dtype=tf.float32, trainable=True, shape=[self.cfg.word_vocab_size, self.cfg.word_dim]) word_embeddings = tf.nn.embedding_lookup(_word_embeddings, self.word_ids, name="word_embeddings") with tf.variable_scope('char_rep_method'): if self.cfg.use_char_emb: _char_embeddings = tf.get_variable(name='_char_embeddings', dtype=tf.float32, trainable=True, shape=[self.cfg.char_vocab_size, self.cfg.char_dim]) char_embeddings = tf.nn.embedding_lookup(_char_embeddings, self.char_ids, name="char_embeddings") if self.cfg.char_rep_method == 'rnn': char_rnn = BiRNN(self.cfg.num_units_char) char_output = char_rnn(char_embeddings, self.word_lengths, return_last_state=True) else: # cnn model for char representation char_output = multi_conv1d(char_embeddings, self.cfg.filter_sizes, self.cfg.heights, "VALID", self.is_train, self.keep_prob) word_embeddings = tf.concat([word_embeddings, char_output], axis=-1) if self.cfg.use_highway: with tf.variable_scope("highway"): self.word_embeddings = highway_network(word_embeddings, self.cfg.highway_num_layers, bias=True, is_train=self.is_train, keep_prob=self.keep_prob) else: # directly dropout before model_op self.word_embeddings = dropout(word_embeddings, keep_prob=self.keep_prob, is_train=self.is_train) print("embeddings shape: {}".format(self.word_embeddings.get_shape().as_list()))
def _build_embeddings_op(self): tf.summary.scalar('dropout_keep_probability', self.keep_prob) tf.summary.scalar("learning_rate", self.lr) with tf.variable_scope('words'): if self.cfg.use_pretrained: _word_embeddings = tf.Variable(self.cfg.glove_embeddings, name='_word_embeddings', dtype=tf.float32, trainable=self.cfg.finetune_emb) else: _word_embeddings = tf.get_variable( name='_word_embeddings', dtype=tf.float32, trainable=True, shape=[self.cfg.word_vocab_size, self.cfg.word_dim]) word_embeddings = tf.nn.embedding_lookup(_word_embeddings, self.word_ids, name="word_embeddings") with tf.variable_scope('char_represent'): if self.cfg.use_char_emb: _char_embeddings = tf.get_variable( name='_char_embeddings', dtype=tf.float32, trainable=True, shape=[self.cfg.char_vocab_size, self.cfg.char_dim]) char_embeddings = tf.nn.embedding_lookup( _char_embeddings, self.char_ids, name="char_embeddings") s = tf.shape( char_embeddings ) # [batch size, max length of sentence, max length of word, char_dim] output = multi_conv1d(char_embeddings, self.cfg.filter_sizes, self.cfg.heights, "VALID", self.is_train, self.keep_prob, scope="char_cnn") # shape = (batch size, max sentence length, char representation size) self.char_output = tf.reshape( output, [s[0], s[1], self.cfg.char_out_size]) word_embeddings = tf.concat( [word_embeddings, self.char_output], axis=-1) if self.cfg.use_highway: with tf.variable_scope("highway"): self.word_embeddings = highway_network( word_embeddings, self.cfg.highway_num_layers, bias=True, is_train=self.is_train, keep_prob=self.keep_prob) else: # directly dropout before model_op self.word_embeddings = dropout(word_embeddings, keep_prob=self.keep_prob, is_train=self.is_train) print('word embeddings shape: {}'.format( self.word_embeddings.get_shape().as_list()))
def _build_model_op(self): with tf.variable_scope('bidirectional_rnn'): if self.cfg.mode_type == 'stack': # n-layers stacked bidirectional rnn rnns = StackBiRNN(self.cfg.num_layers, self.cfg.num_units, scope='stack_mode') output = rnns(self.word_embeddings, self.seq_lengths) output = dropout(output, keep_prob=self.keep_prob, is_train=self.is_train) elif self.cfg.mode_type == 'densely_connected': rnns = DenseConnectBiRNN(self.cfg.num_layers_dc, self.cfg.num_units_list) output = rnns(self.word_embeddings, self.seq_lengths) output = dropout(output, keep_prob=self.keep_prob, is_train=self.is_train) else: # default single model rnns = BiRNN(self.cfg.num_units, scope='single_mode') output = rnns(self.word_embeddings, self.seq_lengths) output = dropout(output, keep_prob=self.keep_prob, is_train=self.is_train) print("rnn output shape: {}".format(output.get_shape().as_list())) with tf.variable_scope("project"): self.logits = tf.layers.dense(output, self.cfg.tag_vocab_size, use_bias=True) print("logits shape: {}".format(self.logits.get_shape().as_list())) self.variable_summaries(self.logits)