def __init__(self, cl_logits_input_dim=None): self.layers = {} self.initialize_vocab() self.layers['embedding'] = layers_lib.Embedding( self.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb, name='embedding') self.layers['embedding_1'] = layers_lib.Embedding( self.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb, name='embedding_1') self.layers['cnn'] = layers_lib.CNN(FLAGS.embedding_dims, FLAGS.keep_prob_emb) self.layers['lstm_1'] = layers_lib.BiLSTM(FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, name="Bilstm") action_type = 5 if FLAGS.action == 'all' else 4 self.layers['action_select'] = layers_lib.Actionselect( action_type, FLAGS.keep_prob_dense, name='action_output') self.layers['cl_logits'] = layers_lib.Project_layer( FLAGS.num_classes, FLAGS.keep_prob_dense, name='project_layer')
def _init_embed(self): self.word_embed_matrix = tf.Variable(initial_value=self.word_embed_pl, trainable=False) self.pos_embed_matrix = tf.Variable(initial_value=np.random.normal( 0, 0.1, (2 * self.args.sentence_length + 1, self.args.pos_dims)), trainable=True, dtype=tf.float32) self.layers = {} self.layers['BiLSTM'] = layers_lib.BiLSTM(self.args.rnn_size) self.layers['att_weights'] = { 'h1': tf.Variable( tf.truncated_normal([self.args.rnn_size, 1], stddev=0.01)), } self.input_data_embed = tf.nn.embedding_lookup(self.word_embed_matrix, self.input_data) self.input_sents_embed_un = tf.nn.embedding_lookup( self.word_embed_matrix, self.input_sents) self.input_pos_embed = tf.nn.embedding_lookup(self.word_embed_matrix, self.input_sents) self.input_sents_embed = tf.concat( [self.input_sents_embed_un, self.input_pos_embed], 2)
def _init_embed(self): self.char_embed = tf.Variable(initial_value=tf.truncated_normal( [ len(self.model_args.characterEmbed.character2id), self.args.char_rnn_size ], stddev=0.01), trainable=True, name="char_embeddings", dtype=tf.float32) self.word_embed_matrix = tf.Variable(initial_value=self.word_embed_pl, trainable=False) self.layers = {} self.layers['BiLSTM'] = layers_lib.BiLSTM(self.args.rnn_size) self.layers['LSTM'] = layers_lib.LSTM( self.args.char_rnn_size) #随便设置的char的 self.label_embedding = tf.get_variable('label_embeddings', initializer=tf.truncated_normal( [500, self.args.class_size], stddev=0.01), trainable=True) self.input_data_embed = tf.nn.embedding_lookup(self.word_embed_matrix, self.input_data)
def _init_embed(self): self.word_embed_matrix = tf.Variable(initial_value=self.word_embed_pl, trainable=False) self.layers = {} self.layers['BiLSTM'] = layers_lib.BiLSTM(self.args.rnn_size) self.feature_dims = self.args.attention_hidden_size self.layers['att_weights'] = { 'h1': tf.Variable( tf.truncated_normal( [2 * self.args.rnn_size, self.args.attention_hidden_size], stddev=0.01)), 'h2': tf.Variable( tf.truncated_normal([self.args.attention_hidden_size, 1], stddev=0.01)), } self.input_data_embed = tf.nn.embedding_lookup(self.word_embed_matrix, self.input_data) #time:2019/1/21 self.rel_type_embedding = tf.Variable( initial_value=tf.truncated_normal( [self.args.class_size, self.args.type_dim], stddev=0.01), trainable=True, name="rel_type_embedding", dtype=tf.float32) self.img_type_embedding = tf.Variable( initial_value=tf.truncated_normal( [self.args.class_size, self.args.type_dim], stddev=0.01), trainable=True, name="img_type_embedding", dtype=tf.float32) self.rel_isa_embedding_diag = tf.Variable( initial_value=tf.truncated_normal([self.args.type_dim], stddev=0.01), trainable=True, name="img_type_embedding", dtype=tf.float32) self.rel_isa_embedding = tf.diag(self.rel_isa_embedding_diag) self.img_isa_embedding_diag = tf.Variable( initial_value=tf.truncated_normal([self.args.type_dim], stddev=0.01), trainable=True, name="img_isa_embedding_diag", dtype=tf.float32) self.img_isa_embedding = tf.diag(self.img_isa_embedding_diag)
def _init_embed(self): self.word_embed_matrix = tf.Variable(initial_value=self.word_embed_pl, trainable=False) self.layers = {} self.layers['BiLSTM'] = layers_lib.BiLSTM(self.args.rnn_size) #This place is very important! self.feature_dims = self.args.attention_hidden_size self.layers['att_weights'] = { 'h_m': tf.Variable( tf.truncated_normal([self.args.word_dim, self.feature_dims], stddev=0.01)), 'h1': tf.Variable( tf.truncated_normal( [2 * self.args.rnn_size, self.feature_dims], stddev=0.01)), 'h2': tf.Variable( tf.truncated_normal([self.feature_dims, 1], stddev=0.01)), } self.type_token_embeddings = tf.Variable( initial_value=tf.truncated_normal( [self.type_tree.typeToken_num, self.args.type_dim], stddev=0.01), trainable=True, name="type_token_embeddings", dtype=tf.float32) self.type_token_embeddings_add_padding = tf.concat([ self.type_token_embeddings, tf.constant([[0.0] * self.args.type_dim]) ], 0) self.type_path_embedding = tf.nn.embedding_lookup( self.type_token_embeddings_add_padding, self.type_path) self.type_path_embedding = tf.reduce_sum(self.type_path_embedding, 1) self.type_embeddings_norm = tf.nn.l2_normalize( self.type_path_embedding, -1) self.type_embeddings_norm_add_padding = tf.concat([ self.type_embeddings_norm, tf.constant([[0.0] * self.args.type_dim]) ], 0)
def _init_embed(self): self.word_embed_matrix = tf.Variable(initial_value=self.word_embed_pl, trainable=False) self.layers = {} self.layers['BiLSTM'] = layers_lib.BiLSTM(self.args.rnn_size) self.layers['att_weights'] = { 'h1': tf.Variable( tf.truncated_normal( [2 * self.args.rnn_size, self.args.attention_hidden_size], stddev=0.01)), 'h2': tf.Variable( tf.truncated_normal([self.args.attention_hidden_size, 1], stddev=0.01)), } self.input_data_embed = tf.nn.embedding_lookup(self.word_embed_matrix, self.input_data)