def composite_function(_input, outSize, width): with tf.variable_scope("composite_function"): output = _conv1d(_input, outSize, width=width) output = ln(output) output = tf.nn.relu(output) return output
def conv1d(x, outputsize, width): inputSize = x.get_shape()[-1] filter_ = tf.get_variable("conv_filter", shape=[width, inputSize, outputsize]) bias = tf.get_variable("conv_bias", shape=[outputsize]) convolved = tf.nn.conv1d(x, filters=filter_, stride=2, padding="SAME") convolved += bias convolved = ln(convolved) return convolved
def transition_to_vector(_input): ''' Transforms the last block into a single vector by avg_pooling ''' output = ln(_input) output = tf.nn.relu(output) last_pool_kernel = int(output.get_shape()[-2]) output = avg_pool1d(output, last_pool_kernel) output = tf.squeeze(output, axis=1) return output
def bottleneck(_input, growthRate): ''' Per the paper, each bottlneck outputs 4k feature size where k is the growth rate of the network. :return: ''' outSize = growthRate * 4 output = _conv1d(_input, outSize, width=1) output = ln(output) output = tf.nn.relu(output) return output
def conv1d_transpose(x, targetHidden, width, growth_rate): length, inputHidden = x.get_shape()[-2:] outputShape = [ FLAGS.batch_size, 1, length.value * growth_rate, targetHidden ] while len(x.get_shape()) < 4: x = tf.expand_dims(x, axis=1) bias = tf.get_variable("deconv_filter", shape=[targetHidden]) filter_ = tf.get_variable("deconv_bias", shape=[1, width, targetHidden, inputHidden]) conv_trans = tf.nn.conv2d_transpose(x, filter=filter_, output_shape=outputShape, strides=[1, 1, growth_rate, 1]) conv_trans += bias conv_trans = ln(conv_trans) conv_trans = tf.nn.relu(conv_trans) conv_trans = tf.squeeze(conv_trans, 1) return conv_trans
def nonlin_ln(x): return ln( tf.nn.dropout(tf.nn.softsign(x), keep_prob=FLAGS.dropout_keep_prob))
def layer_norm(input_tensor): """Run layer normalization on the last dimension of the tensor.""" return ln(inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1)