def Net(aa, yt, x): s=aa.shape[1] with tf.sg_context(name='NNReg', stride=1, act='leaky_relu', bn=True, reuse=tf.AUTO_REUSE): yt=tf.expand_dims(yt,2) v1=tf.expand_dims(x,2).sg_conv(dim=16, size=(1,1), name='gen9',pad="SAME",bn=True) v2=v1.sg_conv(dim=64, size=(1,1), name='gen1',pad="SAME",bn=True) v3=v2.sg_conv(dim=128, size=(1,1), name='gen2',pad="SAME",bn=True) v4=v3.sg_conv(dim=256, size=(1,1), name='gen3',pad="SAME",bn=True) v5=v4.sg_conv(dim=512, size=(1,1), name='gen4',pad="SAME",bn=True) v5=tf.tile(tf.expand_dims(tf.reduce_max(v5, axis=1),axis=1),[1,s,1,1]) vv5=v5 v1=yt.sg_conv(dim=16, size=(1,1), name='gen99',pad="SAME",bn=True) v2=v1.sg_conv(dim=64, size=(1,1), name='gen11',pad="SAME",bn=True) v3=v2.sg_conv(dim=128, size=(1,1), name='gen22',pad="SAME",bn=True) v4=v3.sg_conv(dim=256, size=(1,1), name='gen33',pad="SAME",bn=True) v5=v4.sg_conv(dim=512, size=(1,1), name='gen44',pad="SAME",bn=True) v5=tf.tile(tf.expand_dims(tf.reduce_max(v5, axis=1),axis=1),[1,s,1,1]) ff=tf.concat([tf.expand_dims(aa,2),v5], axis=-1) ff=tf.concat([ff,vv5], axis=-1) f1=ff.sg_conv(dim=256, size=(1,1), name='f1',pad="SAME",bn=True) f2=f1.sg_conv(dim=128, size=(1,1), name='f2',pad="SAME",bn=True) f3=f2.sg_conv(dim=2, size=(1,1), name='f3',pad="SAME",bn=False, act="linear") f3=tf.squeeze(f3,axis=2) return f3
def rnn_classify(x, num_classes, is_test=False): with tf.sg_context(name='rnn_classify'): fw_cell = tf.nn.rnn_cell.MultiRNNCell( [lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True) bw_cell = tf.nn.rnn_cell.MultiRNNCell( [lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True) words_used_in_sent = tf.sign( tf.reduce_max(tf.abs(x), reduction_indices=2)) length = tf.cast( tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32) outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32, sequence_length=length) output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim]) prediction = output.sg_dense(dim=num_classes, name='dense') res = tf.reshape(prediction, [x.get_shape().as_list()[0], -1, num_classes]) return res
def make_scaling_matrix_for_conv(input_shape, filter_shape, strides, padding='SAME'): INPUT_ONES = np.ones(input_shape, dtype=np.float32) FILTER_ONES = np.ones(filter_shape, dtype=np.float32) output = tf.nn.conv2d(INPUT_ONES, FILTER_ONES, strides=strides, padding=padding) max_output = tf.reduce_max(output) norm_output = tf.div(output, max_output) inv_norm_output = tf.div(1.0, norm_output) return inv_norm_output
def rnn_classify(x, num_classes, is_test=False): with tf.sg_context(name='rnn_classify'): fw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True) bw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True) words_used_in_sent = tf.sign(tf.reduce_max(tf.abs(x), reduction_indices=2)) length = tf.cast(tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32) outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32, sequence_length=length) output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim]) prediction = output.sg_dense(dim=num_classes, name='dense') res = tf.reshape(prediction, [x.get_shape().as_list()[0], -1, num_classes]) return res
def sg_max(tensor, opt): r"""Computes the maximum of elements across axis of a tensor. See `tf.reduce_max()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
def ner_cost(tensor, opt): one_hot_labels = tf.one_hot(opt.target - 1, opt.num_classes, dtype=tf.float32) cross_entropy = one_hot_labels * tf.log(tensor) cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2) mask = tf.sign(tf.reduce_max(tf.abs(one_hot_labels), reduction_indices=2)) cross_entropy *= tf.cast(mask, tf.float32) cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) length = tf.cast(tf.reduce_sum(tf.sign(opt.target), reduction_indices=1), tf.int32) cross_entropy /= tf.cast(length, tf.float32) out = tf.reduce_mean(cross_entropy, name='ner_cost') # add summary tf.sg_summary_loss(out, name=opt.name) return out
def sg_max(tensor, opt): return tf.reduce_max(tensor, reduction_indices=opt.dims, keep_dims=opt.keep_dims, name=opt.name)