def _Build_GenerativeNetwork(self, g_input): generate_weights = _initialize_generate_variables() layer0 = tf.nn.relu(tf.matmul(g_input, generate_weights['w0']) + generate_weights['b0']) drop_layer0 = tf.nn.dropout(layer0, self.keep) layer1 = tf.nn.relu(tf.matmul(layer0, generate_weights['w1']) + generate_weights['b1']) drop_layer1 = tf.dropout(layer1, self.keep) output_layer = tf.nn.relu(tf.matmul(layer1, generate_weights['w2']) + generate_weights['b2']) drop_output_layer = tf.dropout(output_layer, self.keep) return drop_output_layer
def _Build_AdversarialNetwork(self, a_input): adversary_weights = _initialize_adversary_variables() layer0 = tf.nn.relu(tf.matmul(a_input, adversary_weights['w0']) + adversary_weights['b0']) drop_layer0 = tf.dropout(layer0, self.keep) layer1 = tf.nn.relu(tf.matmul(layer0, adversary_weights['w1']) + adversary_weights['b1']) prob = tf.nn.softmax(tf.matmul(layer1, adversary_weights['w2']) + adversary_weights['b2']) return prob
def fm(self, x): with tf.variable_scope("linear_layer"): w = tf.get_variable("w", shape=[self.feature_size, 1], initializer=tf.truncated_normal_initializer( mean=0, stddev=1e-2)) b = tf.get_variable("b", shape=[1], initializer=tf.zeros_initializer()) self.linear_output = tf.matmul(x, w) + b #(batch, field_size, 1) self.linear_output = tf.dropout(self.linear_output, self.fm_keep_prob) with tf.variable_scope("interaction_layer"): # sum square part sum_sq_part = tf.square(tf.reduce_sum(self.embeddings, 1)) #(batch, embedding_size) # square sum part sq_sum_part = tf.reduce_sum(tf.square(self.embeddings), 1) #(batch, embedding_size) self.interaction_output = 0.5 * tf.subtract( sum_sq_part, sq_sum_part) self.interaction_output = tf.nn.dropout(self.interaction_output, self.fm_keep_prob) return (self.linear_output, self.interaction_output)
def call(self, inputs, **kwargs): main_input, embedding_matrix = inputs input_shape_tensor = tf.shape(main_input) last_input_dim = tf.int_shape(main_input)[-1] emb_input_dim, emb_output_dim = tf.int_shape(embedding_matrix) projected = tf.dot(tf.reshape(main_input, (-1, last_input_dim)), self.projection) if self.add_biases: projected = tf.bias_add(projected, self.biases, data_format='channels_last') if 0 < self.projection_dropout < 1: projected = tf.in_train_phase( lambda: tf.dropout(projected, self.projection_dropout), projected, training=kwargs.get('training')) attention = tf.dot(projected, tf.transpose(embedding_matrix)) if self.scaled_attention: # scaled dot-product attention, described in # "Attention is all you need" (https://arxiv.org/abs/1706.03762) sqrt_d = tf.constant(math.sqrt(emb_output_dim), dtype=tf.floatx()) attention = attention / sqrt_d result = tf.reshape( self.activation(attention), (input_shape_tensor[0], input_shape_tensor[1], emb_input_dim)) return result
def neural_net_model(input_size): # Input Layer net = tf.input_layer(shape=[None, input_size], name='input') # Hidden Layers net = tf.fully_connected(net, 128, activation='relu', name="hlayer1") net = tf.dropout(net, 0.8) net = tf.fully_connected(net, 256, activation='relu', name="hlayer2") net = tf.dropout(net, 0.8) net = tf.fully_connected(net, 512, activation='relu', name="hlayer3") net = tf.dropout(net, 0.8) net = tf.fully_connected(net, 256, activation='relu', name="hlayer4") net = tf.dropout(net, 0.8) net = tf.fully_connected(net, 128, activation='relu', name="hlayer5") net = tf.dropout(net, 0.8) # Output layer net = tf.fully_connected(net, 2, activation='softmax', name="out") net = tf.regression(net, learning_rate = LR) model = tf.DNN(net, tensorboard_dir='log') return model
def call(self, x, adj, training=True): if self.use_bn: x = self.bn1(x, training=training) if training: x = tf.dropout(x, self.input_droprate) x = tf.nn.relu(self.gc1(x, adj)) if self.use_bn: x = self.bn2(x, training=training) if training: x = tf.nn.dropout(x, self.hidden_droprate) x = self.gc2(x, adj) return x
def build_model(self): with tf.variable_scope("SeqClassificationRNN"): # 训练数据 self.inputs = tf.placeholder(tf.int32, shape=[None, self.max_length]) self.seqlen = tf.placeholder(tf.int32, shape=[None]) # 训练标签数据 self.labels = tf.placeholder(tf.int32, shape=[None]) onehot = tf.one_hot(self.labels, self.class_num) # dropout self.keep_prob = tf.placeholder(tf.float32) embedding_matrix = tf.Variable( tf.truncated_normal((self.vocab_size, self.embed_dim), stddev=0.01)) embedding = tf.nn.embedding_lookup(embedding_matrix, self.inputs) cell = tf.nn.rnn_cell.MultiRNNCell([ self.cell(self.hidden_dim, state_is_tuple=True) for _ in range(self.layer_num) ]) outputs, state = tf.nn.dynamic_rnn(cell, embedding, self.seqlen, dtype=tf.float32, swap_memory=True) pooling = tf.reduce_sum(outputs, 1) / self.seqlen dropout = tf.dropout(pooling, keep_prob=self.keep_prob) lr_W = tf.Variable( tf.truncated_normal((self.hidden_dim, self.class_num), stddev=0.1)) lr_b = tf.Variable(tf.zeros((1, self.class_num))) pred = tf.matmul(dropout, lr_W) + lr_b # 定义交叉熵损失函数 self.loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(pred, onehot))
def _build_model(self): """ It does not want arguments because it is going to pick whatever it needs. """ start_trainable_variables = tf.trainable_variables() net = self.inputs for layer_dim in layer_dims: net = tf.layers.dense(inputs=net, units=layer_dim, activation=tf.nn.leaky_relu) # Never dropout for first or last layer. if not self.test_phase: net = tf.dropout(net, 0.5) # last layer back to the original input dimension self.outputs = tf.layers.dense(inputs=net, units=self.output_dim, activation=tf.identity) end_trainable_variables = tf.trainable_variables() self.L_params = [ param for param in end_trainable_variables if param not in start_trainable_variables ]
def build_model(self, data_sources: Dict[str, BaseDataSource], mode: str): """Build model.""" data_source = next(iter(data_sources.values())) input_tensors = data_source.output_tensors x = input_tensors['left-eye'] batch_size = 32 # Trainable parameters should be specified within a known `tf.variable_scope`. # This tag is later used to specify the `learning_schedule` which describes when to train # which part of the network and with which learning rate. # # This network has two scopes, 'conv' and 'fc'. Though in practise it makes little sense to # train the two parts separately, this is possible. with tf.variable_scope('conv'): with tf.variable_scope('conv1'): x = tf.pad(x, [[0, 0], [0, 0], [0, 1], [0, 1]], "constant") x = tf.layers.conv2d(x, filters=64, kernel_size=3, strides=2, padding='valid', data_format='channels_first') # self.summary.filters('filters', x) x = tf.nn.relu(x) # self.summary.feature_maps('features', x, data_format='channels_first') x = tf.pad(x, [[0, 0], [0, 0], [0, 1], [0, 1]], "constant") x = tf.layers.dropout(x, rate = 0.1, noise_shape = (batch_size, 128, 1, 1), training=False) x = tf.layers.conv2d(x, filters=128, kernel_size=3, strides=1, padding='valid', data_format='channels_first') x = tf.nn.relu(x) x = tf.layers.max_pooling2d(x, pool_size=3, strides=2, padding='valid', data_format='channels_first') # self.summary.feature_maps('features', x, data_format='channels_first') with tf.variable_scope('conv2'): x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") x = tf.layers.conv2d(x, filters=256, kernel_size=3, strides=1, padding='valid', data_format='channels_first') # self.summary.feature_maps('features', x, data_format='channels_first') x = tf.nn.relu(x) #x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") # x = tf.layers.dropout (x, rate=0.1, noise_shape=(batch_size, 512,1,1), training=False) # x = tf.layers.conv2d(x, filters=256, kernel_size=5, strides=2, # padding='same', data_format='channels_first') # self.summary.feature_maps('features', x, data_format='channels_first') # x = tf.nn.relu(x) x = tf.layers.max_pooling2d(x, pool_size=3, strides=2, padding='valid', data_format='channels_first') with tf.variable_scope('conv3'): x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") x = tf.layers.conv2d(x, filters=512, kernel_size=3, strides=1, padding='valid', data_format='channels_first') # self.summary.feature_maps('features', x, data_format='channels_first') x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") x = tf.layers.dropout (x, rate=0.1, noise_shape=(batch_size, 512, 1, 1), training=False) x = tf.layers.conv2d(x, filters=512, kernel_size=3, strides=1, padding='valid', data_format='channels_first') # self.summary.feature_maps('features', x, data_format='channels_first') x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") x = tf.layers.conv2d(x, filters=512, kernel_size=3, strides=1, padding='valid', data_format='channels_first') # self.summary.feature_maps('features', x, data_format='channels_first') x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") x = tf.layers.dropout(x, rate=0.1, noise_shape=(batch_size, 512, 1, 1), training=False) x = tf.layers.conv2d(x, filters=512, kernel_size=3, strides=1, padding='valid', data_format='channels_first') x = tf.layers.max_pooling2d(x, pool_size=3, strides=2, padding='same', data_format='channels_first') with tf.variable_scope('conv4'): x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") x = tf.dropout(x, rate=0.1, noise_shape=(batch_size, 512, 1, 1), training=False) x = tf.layers.conv2d(x, filters=512, kernel_size=3, strides=1, padding='valid', data_format='channels_first') x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") # x = tf.layers.dropout(x, rate=0.1, noise_shape=(batch_size, 1024, 1, 1), training=False) x = tf.layers.conv2d(x, filters=256, kernel_size=3, strides=2, padding='same', data_format='channels_first') #x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") # x = tf.layers.dropout(x, rate=0.1, noise_shape=(batch_size, 1024, 1, 1), training=False) x = tf.layers.conv2d(x, filters=256, kernel_size=3, strides=2, padding='same', data_format='channels_first') x = tf.layers.max_pooling2d(x, pool_size=3, strides=2, padding='same', data_format='channels_first') with tf.variable_scope('conv5'): #x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") x = tf.layers.conv2d(x, filters=512, kernel_size=3, strides=2, padding='same', data_format='channels_first') x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") # x = tf.layers.dropout(x, rate=0.1, noise_shape=(batch_size, 1024, 1, 1), training=False) x = tf.layers.conv2d(x, filters=512, kernel_size=3, strides=2, padding='valid', data_format='channels_first') x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], "constant") x = tf.layers.dropout(x, rate=0.1, noise_shape=(batch_size, 512, 1, 1), training=False) x = tf.layers.conv2d(x, filters=512, kernel_size=3, strides=1, padding='valid', data_format='channels_first') x = tf.layers.max_pooling2d(x, pool_size=3, strides=2, padding='same', data_format='channels_first') with tf.variable_scope('fc'): # Flatten the 50 feature maps down to one vector x = tf.contrib.layers.flatten(x) # FC layer x = tf.layers.dense(x, units=4096, activation='relu', name='fc5') x = tf.layers.dense(x, units=4096, activation='relu', name='fc6') x = tf.layers.dense(x, units=1024, activation='softmax', name='fc7') self.summary.histogram('fc7/activations', x) # Directly regress two polar angles for gaze direction x = tf.layers.dense(x, units=2, name='fc8') self.summary.histogram('fc8/activations', x) # Define outputs loss_terms = {} metrics = {} if 'gaze' in input_tensors: y = input_tensors['gaze'] with tf.variable_scope('mse'): # To optimize loss_terms['gaze_mse'] = tf.reduce_mean(tf.squared_difference(x, y)) with tf.variable_scope('ang'): # To evaluate in addition to loss terms metrics['gaze_angular'] = util.gaze.tensorflow_angular_error_from_pitchyaw(x, y) return {'gaze': x}, loss_terms, metrics
def dropped_softmax(): return tf.dropout(attention_softmax, self.dropout)
# Load the weights and bias #Since tf.train.Saver.restore() sets all the TensorFlow Variables, you don't need to call tf.global_variables_initializer(). #it will restore all the previously saved variables saver.restore(sess, save_file) #when tf try to load the variables it will load variable with the matching name , if #no name was assigned to the variable when assigned ,it will assign the nanme type_number and would #potentially confused when load back the stored variable ,due to mis match of the name weights = tf.Variable(tf.truncated_normal([2, 3]), name='weights_0') #return the index of the max value of that axies, axis = 0 compare rows, axis = 1 compare columns tf.argmax(input, axis=None, name=None, dimension=None) #drop out function #n order to compensate for dropped units, tf.nn.dropout() multiplies all units that are kept (i.e. not dropped) by 1/keep_prob tf.dropout(hidden_layer,keep_prob) #np.pad a = [1, 2, 3, 4, 5] np.pad(a, (2,3), 'constant', constant_values=(4, 6)) array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) # ---------------------------------------------------------------------------------------- #convolution network #stride is the stride for each dimention of the input tf.nn.conv2d(X,W1, strides = [1,s,s,1], padding = 'SAME') #tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'): given an input A, #this function uses a window of size (f, f) and strides of size (s, s) #to carry out max pooling over each window. tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'):
def SublayerConnection(self, x, keep_rate): return batch_norm( (x + tf.dropout(self.model(x, x, x), keep_rate=keep_rate)))
def MultiRNN(x, BATCH_SIZE, seq, NUM_CLASSES, NUM_LSTM, NUM_HIDDEN, OUTPUT_KEEP_PROB, NUM_MLP, NUM_NEURON, training=True): """model a LDNN Network, Args: x: feature, shape = [batch_size, time_length,160] Returns: original_out: prediction update_op: resume state from previous state reset_op: not use in train, only for validation to reset zero """ with tf.variable_scope('lstm', initializer=tf.orthogonal_initializer()): """Runs the forward step for the RNN model. Args: inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`. initial_state: a tuple of tensor(s) of shape `[num_layers * num_dirs, batch_size, num_units]`. If not provided, use zero initial states. The tuple size is 2 for LSTM and 1 for other RNNs. training: whether this operation will be used in training or inference. Returns: output: a tensor of shape `[time_len, batch_size, num_dirs * num_units]`. It is a `concat([fwd_output, bak_output], axis=2)`. output_states: a tuple of tensor(s) of the same shape and structure as `initial_state`. """ mlstm_cell = tf.contrib.cudnn_rnn.CudnnLSTM(NUM_LSTM, NUM_HIDDEN) # dropout connect for w in mlstm_cell.variables: tf.assign(w, tf.dropout(w, OUTPUT_KEEP_PROB)) states = get_state_variables(NUM_LSTM, BATCH_SIZE, NUM_HIDDEN) # get shape, and add inputs input_shape attributes # batch_x_shape = tf.shape(x) x = tf.reshape(x, [BATCH_SIZE, -1, 160]) # batch_major -> time_length_major inputs = tf.transpose(x, [1, 0, 2]) outputs, new_states = mlstm_cell(inputs, states, training=training) # time_length_major -> batch_major outputs = tf.transpose(outputs, [1, 0, 2]) update_op = get_state_update_op(states, new_states, NUM_LSTM) # TODO: reset the state to zero or the final state og training??? Now is zero. reset_op = get_state_reset_op(states, BATCH_SIZE, NUM_LSTM, NUM_HIDDEN) outputs = tf.reshape(outputs, [-1, NUM_HIDDEN]) with tf.variable_scope('mlp'): weights = { 'out': tf.get_variable( 'out', shape=[NUM_HIDDEN, NUM_CLASSES], initializer=tf.contrib.layers.xavier_initializer()), 'h1': tf.get_variable( 'h1', shape=[NUM_HIDDEN, NUM_NEURON], initializer=tf.contrib.layers.xavier_initializer()), 'h2': tf.get_variable( 'h2', shape=[NUM_NEURON, NUM_NEURON], initializer=tf.contrib.layers.xavier_initializer()), 'h3': tf.get_variable( 'h3', shape=[NUM_NEURON, NUM_NEURON], initializer=tf.contrib.layers.xavier_initializer()), 'mlpout': tf.get_variable('mlpout', shape=[NUM_NEURON, NUM_CLASSES], initializer=tf.contrib.layers.xavier_initializer()) } if NUM_MLP == 0: top = tf.matmul(outputs, weights['out']) original_out = tf.reshape(top, [BATCH_SIZE, -1, NUM_CLASSES]) return original_out, update_op, reset_op elif NUM_MLP == 1: l1 = tf.nn.dropout(tf.matmul(outputs, weights['h1']), keep_prob=OUTPUT_KEEP_PROB) l1 = tf.nn.relu(l1) top = tf.matmul(l1, weights['mlpout']) original_out = tf.reshape(top, [BATCH_SIZE, -1, NUM_CLASSES]) return original_out, update_op, reset_op elif NUM_MLP == 2: l1 = tf.nn.dropout(tf.matmul(outputs, weights['h1']), keep_prob=OUTPUT_KEEP_PROB) l1 = tf.nn.relu(l1) l2 = tf.nn.dropout(tf.matmul(l1, weights['h2']), keep_prob=OUTPUT_KEEP_PROB) l2 = tf.nn.relu(l2) top = tf.matmul(l2, weights['mlpout']) original_out = tf.reshape(top, [BATCH_SIZE, -1, NUM_CLASSES]) return original_out, update_op, reset_op elif NUM_MLP == 3: l1 = tf.nn.dropout(tf.matmul(outputs, weights['h1']), keep_prob=OUTPUT_KEEP_PROB) l1 = tf.nn.relu(l1) l2 = tf.nn.dropout(tf.matmul(l1, weights['h2']), keep_prob=OUTPUT_KEEP_PROB) l2 = tf.nn.relu(l2) l3 = tf.nn.dropout(tf.matmul(l2, weights['h3']), keep_prob=OUTPUT_KEEP_PROB) l3 = tf.nn.relu(l3) top = tf.matmul(l3, weights['mlpout']) original_out = tf.reshape(top, [BATCH_SIZE, -1, NUM_CLASSES]) return original_out, update_op, reset_op
def __init__(self, sequence_length, num_classes, vocab_size, embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0): """ :param sequence_length: 句子的长度,固定同意长度 :param num_classes: 输出层的神经元个数 :param vocab_size: 词典的总数用于输入,[vocabulary大小, embedding大小] :param embedding_size: 嵌入层的维度 :param filter_sizes: 卷积层需要覆盖的单次数[2,3,4] :param num_filters: 每个尺寸的滤波器的个数, 例如100个 :param l2_reg_lambda: L2正则化 """ # 输入输出和dropout的占位符 self.input_x = tf.placeholder(tf.int32, shape=[None, sqeuence_length], name='input_x') self.input_y = tf.placeholder(tf.float32, shape=[None, num_classes], name='input_y') self.droupout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob') # L2正则化 l2_loss = tf.constant(0.0) # 嵌入层 with tf.device('/cpu:0'), tf.name_scope('embedding'): self.W = tf.Variable(tf.random_uniform( [vocab_size, embedding_size], -1.0, 1.0), name='W') # 输入[输入张量, 张量对应的索引] # 输出[None, sequence_length,embedding_size] self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x) # 卷积需要四维tensor # expand_dim和reshape都可以改变维度,但是在构建具体的图时,如果没有具体的值,使用reshape则会报错 self.embedded_chars_expanded = tf.expand_dims( self.embedded_chars, -1) # 为每一个filter size构建卷积层和最大池化层 pooled_outputs = [] for i, filter_size in enumerate(filter_size): with tf.name_scope('conv-maxpool-%s' % filter_size): # 卷积层 # 卷积核,[卷积核的高度和宽度,通道个数,卷积核个数] filter_shape = [filter_size, embedding_size, 1, num_filters] W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='W') b = tf.Variable(tf.constant(0.1, shape=[num_filter]), name='b') # padding: SAME 表示用0来填充,VALID用来表示不填充 # strides: [batch, height,width,channels],batch和channels为1 conv = tf.nn.conv2d(self.embedded_chars_expanded, W, strides=[1, 1, 1, 1], padding='VALID', name='relu') # relu h = tf.nn.relu( tf.nn.max_pool( h, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name='pool')) pooled_outputs.append(pooled) # 组合所有的池化层特征 num_filter_total = num_filters * len(filter_sizes) # 在pooled_outputs 的最后一个维度上连接 self.h_pool = tf.concat(pooled_outputs, 3) self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filter_total]) # 增加dropout with tf.name_scope('dropout'): self.h_drop = tf.dropout(self.h_pool_flat, self.dropout_keep_prob) # 最后unnormalized scores and predictions with tf.name_scope('output'): W = tf.get_variable( 'W', shape=[num_filter_total, num_classes], initializer=tf.contrib.layers.xavier_initializer()) b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name='b') l2_loss += tf.nn.l2_loss(W) l2_loss += tf.nn.l2_loss(b) self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name='scores') self.prediction = tf.argmax(self.scores, 1, name='predictions') # 计算 平均 cross-entropy loss with tf.name_scope('loss'): losses = tf.nn.softmax_cross_entopy_with_logit_v2( logits=self.scores, labels=self.input_y) #l2 正则化 self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss # accuracy with tf.name_scope('accuracy'): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'), name='accuracy')
def __init__(self, config, pretrained_embedding): self._input = tf.placeholder(dtype=tf.int32, shape=[None, config['num_steps']], name='input') self._target = tf.placeholder(dtype=tf.int32, shape=[None], name='target') self.batch_size = config['batch_size'] self.num_steps = config['num_steps'] self.embed_size = config['embed_size'] self.size = config['hidden_size'] self._lr = config['lr'] self.num_classes = config['num_classes'] self.keep_prob = tf.Variable(config['keep_prob'], trainable=False) self.combine_mode = config['combine_mode'] self.weight_decay = config['weight_decay'] with tf.device("/cpu:0"): embedding = tf.Variable(pretrained_embedding, dtype=tf.float32) inputs = tf.nn.embedding_lookup(embedding, self._input) inputs = tf.nn.dropout( inputs, self.keep_prob, noise_shape=[tf.shape(self._input)[0], 1, self.embed_size]) def lstm_cell(input_size): return tf.contrib.rnn.LSTMCell(input_size, forget_bias=0.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse) def attn_cell(input_size): return tf.contrib.rnn.DropoutWrapper( lstm_cell(input_size), output_keep_prob=config['keep_prob'], variational_recurrent=True, dtype=tf.float32) cell = tf.contrib.rnn.MultiRNNCell( [attn_cell(self.embed_size) for i in range(config['num_layers'])]) self._initial_state = cell.zero_state( tf.shape(self._input)[0], tf.float32) outputs = [] state = self._initial_state with tf.variable_scope("RNN"): for time_step in range(self.num_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) if self.combine_mode == 'mean': outputs = tf.stack(outputs, axis=1) outputs = tf.reduce_mean(outputs, axis=1) outputs = tf.nn.dropout(outputs, self.keep_prob) elif self.combine_mode == 'last': outputs = outputs[-1] outputs = tf.dropout(outputs, self.keep_prob) outputs = tf.nn.dropout(outputs, self.keep_prob) else: outputs = tf.stack(outputs, axis=1) outputs = tf.reduce_mean(outputs, axis=1) outputs = tf.nn.dropout(outputs, self.keep_prob) softmax_w = tf.get_variable("softmax_w", [self.size, self.num_classes], dtype=tf.float32) softmax_b = tf.get_variable("softmax_b", [self.num_classes], dtype=tf.float32) logits = tf.matmul(outputs, softmax_w) + softmax_b # update the cost variables loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=self._target, logits=logits) self.l2_loss = sum( tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()) self._cost = cost = tf.reduce_mean( loss) + self.weight_decay * self.l2_loss self._lr = tf.Variable(self._lr, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config['max_grad_norm']) optimizer = tf.train.AdamOptimizer(self._lr) self._train_op = optimizer.apply_gradients(zip(grads, tvars)) self._new_lr = tf.placeholder(tf.float32, shape=[], name="new_learning_rate") self._lr_update = tf.assign(self._lr, self._new_lr) self.predicted_class = tf.cast( tf.argmax(tf.nn.softmax(logits), axis=-1), tf.int32)
def IDCNN_layer(self, model_inputs, name="IDCNN_layer"): """IDCNN layer shape of input after expand = [batch, in_height, in_width, in_channels] in_height:一句话为1,in_width:seq length,in_channels:embedding dim shape of filter = [filter_height, filter_width, in_channels, out_channels] in_channels:embedding dim,out_channels:number of filters 关于 tf.nn.atrous_conv2d(value,filters,rate,padding,name=None) value: [batch, height, width, channels]这样的shape, [batch_size, sentence高度(1), sentence宽度(length), sentence通道数(embedding dim)] filters:[filter_height, filter_width, channels, out_channels] [卷积核的高度,卷积核的宽度,图像通道数,卷积核个数] rate:int型的正数,空洞卷积默认stride=1 rate参数,它定义为我们在输入图像上卷积时的采样间隔,相当于卷积核中穿插了(rate-1)数量的“0”, 这样做卷积时就相当于对原图像的采样间隔变大了。rate=1时,就没有0插入,相当于普通卷积。 padding: string,”SAME”,”VALID”其中之一,决定不同边缘填充方式。 “VALID”,返回[batch,height-(filter_height + (filter_height - 1) * (rate - 1))+1, width-(filter_width + (filter_width - 1) * (rate - 1))+1,out_channels]的Tensor “SAME”,返回[batch, height, width, out_channels]的Tensor :param model_inputs: [batch_size, num_steps, emb_size] :return: [batch_size, num_steps, totalChannels] """ model_inputs = tf.expand_dims(model_inputs, 1) # 化为图像相同维度进行处理 with tf.variable_scope(name): # init filter weights shape = [1, self.filter_width, self.embedding_dim, self.num_filter] weights = tf.get_variable("idcnn_filter", shape=shape, initializer=self.initializer) # first cnn layer cnn_out = tf.nn.conv2d(model_inputs, weights, strides=[1,1,1,1], padding="SAME", use_cudnn_on_gpu=True, name="first cnn layer") # dilate cnn layers reuseFlag = True if self.dropout == 0.0 else False eachModuleOut = [] totalChannels = 0 for i in range(self.num_sublayers): # dilate cnn modules for j in range(len(self.dilate_rate)): # dilate layers of one module # reuse first layer, or when dropout rate is 0.0 with tf.variable_scope("atrous-conv-layer-%d" % i, reuse=True if (reuseFlag or i > 0) else False): w = tf.get_variable( "weights", shape=[1, self.filter_width, self.num_filter, self.num_filter], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable("bias", shape=[self.num_filter]) atrous_conv_out = tf.nn.atrous_conv2d(cnn_out, w, rate=self.dilate_rate[j], padding="SAME") conv = tf.nn.bias_add(atrous_conv_out, b) conv = tf.nn.relu(conv) # record every sub modules` outputs if j == (len(self.dilate_rate) - 1): eachModuleOut.append(conv) # iterate cnn inputs cnn_out = conv totalChannels = self.num_filter * self.num_sublayers # aggregate outputs cnn_outs = tf.concat(values=eachModuleOut, axis=3) cnn_outs = tf.dropout(cnn_outs, 1 - self.dropout) cnn_outs = tf.squeeze(cnn_outs, [1]) # expanded dim cnn_outs = tf.reshape(cnn_outs, [-1, totalChannels]) self.cnn_out_channels = totalChannels return cnn_outs
def build(self, rgb_input, train=False, num_classes=20, random_init_fc8=False, debug=False): """ :param rgb_input is a image batch tensor, [ None x height x width x num_channels ], The shape is how images are loaded. """ with tf.name_scope("Processing"): ch_r, ch_g, ch_b = tf.split(rgb_input, 3, axis=3) bgr = tf.concat( [ch_b - VGG_MEAN_B, ch_g - VGG_MEAN_G, ch_r - VGG_MEAN_R], axis=3) if debug: bgr = tf.Print(bgr, [tf.shape(bgr)], message="Shape of input image: ", summarize=4, first_n=1) # VGG convolutional do_debug = True self.conv1_1 = self._conv_layer(bgr, "conv1_1", False) self.conv1_2 = self._conv_layer(self.conv1_1, "conv1_2", False) self.pool1 = self._max_pool(self.conv1_2, "pool1", do_debug) self.conv2_1 = self._conv_layer(self.pool1, "conv2_1", False) self.conv2_2 = self._conv_layer(self.conv2_1, "conv2_2", False) self.pool2 = self._max_pool(self.conv2_2, "pool2", do_debug) self.conv3_1 = self._conv_layer(self.pool2, "conv3_1", False) self.conv3_2 = self._conv_layer(self.conv3_1, "conv3_2", False) self.conv3_3 = self._conv_layer(self.conv3_2, "conv3_3", False) self.pool3 = self._max_pool(self.conv3_3, "pool3", do_debug) self.conv4_1 = self._conv_layer(self.pool3, "conv4_1", False) self.conv4_2 = self._conv_layer(self.conv4_1, "conv4_2", False) self.conv4_3 = self._conv_layer(self.conv4_2, "conv4_3", False) self.pool4 = self._max_pool(self.conv4_3, "pool4", do_debug) self.conv5_1 = self._conv_layer(self.pool4, "conv5_1", False) self.conv5_2 = self._conv_layer(self.conv5_1, "conv5_2", False) self.conv5_3 = self._conv_layer(self.conv5_2, "conv5_3", False) self.pool5 = self._max_pool(self.conv5_3, "pool5", do_debug) self.fc6 = self._fc_layer(self.pool5, "fc6", do_relu=True, debug=do_debug) if train: self.fc6 = tf.dropout(self.fc6, 0.5) self.fc7 = self._fc_layer(self.fc6, "fc7", do_relu=True, debug=do_debug) if train: self.fc7 = tf.dropout(self.fc7, 0.5) self.fc8 = self._fc_layer(self.fc7, "fc8", do_relu=False, num_classes=num_classes, debug=do_debug) pool4_shape = tf.shape(self.pool4) self.upscore2 = self._upscore_layer(self.fc8, "upscore2", ksize=4, stride=2, num_classes=num_classes, up_w=pool4_shape[2], up_h=pool4_shape[1], debug=do_debug) self.score_pool4 = self._score_layer(self.pool4, "score_pool4", num_classes=num_classes, random_weight_stddev=0.001) self.fuse_pool4 = tf.add(self.upscore2, self.score_pool4) input_shape = tf.shape(bgr) self.upscore32 = self._upscore_layer(self.fuse_pool4, "upscore32", ksize=32, stride=16, num_classes=num_classes, up_w=input_shape[2], up_h=input_shape[1], debug=do_debug) return