def lenet5(images, nclass): # conv 1 net = layers.conv_2d_layer(images, [5, 5, images.get_shape()[3], 20], nonlinearity=None, padding='SAME', name='conv_1', with_biases=False) biases = layers.variable_on_cpu('biases_1', net.get_shape()[3], tf.constant_initializer(0.0), dtype=tf.float32) net = tf.nn.bias_add(net, biases) net = tf.nn.relu(net) # max pool net = tf.nn.max_pool(net, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID') # conv 2 net = layers.conv_2d_layer(net, [5, 5, net.get_shape()[3], 50], nonlinearity=None, padding='SAME', name='conv_2', with_biases=False) biases = layers.variable_on_cpu('biases_2', net.get_shape()[3], tf.constant_initializer(0.0), dtype=tf.float32) net = tf.nn.bias_add(net, biases) net = tf.nn.relu(net) # max pool net = tf.nn.max_pool(net, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID') # reshape net = tf.reshape(net, [-1, (net.get_shape()[1]*net.get_shape()[2]*net.get_shape()[3]).value]) # dense 1 net = layers.dense_layer(net, net.get_shape()[1], 500, nonlinearity=None, name='dense_1') net = tf.nn.relu(net) # dense 2 net = layers.dense_layer(net, net.get_shape()[1], nclass, nonlinearity=None, name='dense_2') return net
def lenet5(images, nclass, wd, reuse): # conv 1 net = layers.conv_2d_layer(images, [5, 5], images.get_shape()[3].value, 20, nonlinearity=tf.nn.relu, wd=wd, padding='SAME', name='conv_1') # max pool net = tf.nn.max_pool(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # conv 2 net = layers.conv_2d_layer(net, [5, 5], net.get_shape()[3].value, 50, nonlinearity=tf.nn.relu, wd=wd, padding='SAME', name='conv_2') # max pool net = tf.nn.max_pool(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # reshape net = tf.reshape(net, [ -1, (net.get_shape()[1] * net.get_shape()[2] * net.get_shape()[3]).value ]) # dense 1 net = tf.layers.dropout(net, 0.5, training=True) net = layers.dense_layer(net, net.get_shape()[1].value, 500, nonlinearity=tf.nn.relu, wd=wd, name='dense_1') net = tf.layers.dropout(net, 0.5, training=True) # dense 2 net = layers.dense_layer(net, net.get_shape()[1].value, nclass, nonlinearity=None, wd=wd, name='dense_2') return net
def conv_bn_rectify(net, num_filters, wd, name, is_training, reuse): with tf.variable_scope(name): net = layers.conv_2d_layer(net, [3,3], net.get_shape()[3], num_filters, nonlinearity=None, wd=wd, padding='SAME', name='conv', with_biases=False) biases = layers._variable_on_cpu('biases', net.get_shape()[3], tf.constant_initializer(0.0), dtype=tf.float32) net = tf.nn.bias_add(net, biases) net = tf.contrib.layers.batch_norm(net, scope=tf.get_variable_scope(), decay=0.9, reuse=reuse, is_training=is_training) net = tf.nn.relu(net) return net
def lenet5(images, nclass, wd, reuse): # conv 1 net = layers.conv_2d_layer(images, [5,5], images.get_shape()[3].value, 20, nonlinearity=tf.nn.relu, wd=wd, padding='SAME', name='conv_1') # max pool net = tf.nn.max_pool(net, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID') # conv 2 net = layers.conv_2d_layer(net, [5,5], net.get_shape()[3].value, 50, nonlinearity=tf.nn.relu, wd=wd, padding='SAME', name='conv_2') # max pool net = tf.nn.max_pool(net, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID') # reshape net = tf.reshape(net, [-1, (net.get_shape()[1]*net.get_shape()[2]*net.get_shape()[3]).value]) # dense 1 net = layers.dense_layer(net, net.get_shape()[1].value, 500, nonlinearity=tf.nn.relu, wd=wd, name='dense_1') # dense 2 net = layers.dense_layer(net, net.get_shape()[1].value, nclass, nonlinearity=None, wd=wd, name='dense_2') return net
def conv_bn_rectify(net, num_filters, name, is_training, reuse, kl_weight=1.0): with tf.variable_scope(name): net = layers.conv_2d_layer(net, [3,3, net.get_shape()[3], num_filters], nonlinearity=None, padding='SAME', name='conv', with_biases=False, reuse=reuse) net = layers.sbp_dropout(net, 3, is_training, 'sbp', reuse, kl_weight=kl_weight) biases = layers.variable_on_cpu('biases', net.get_shape()[3], tf.constant_initializer(0.0), dtype=tf.float32) net = tf.nn.bias_add(net, biases) net = tf.contrib.layers.batch_norm(net, scope=tf.get_variable_scope(), reuse=reuse, is_training=False, center=True, scale=True) net = tf.nn.relu(net) return net