def build_net(images, n_class=None, is_training=True, reuse=False, alpha=1, scope='mobile_id'): with tf.variable_scope(scope, reuse=reuse) as scope: with slim.arg_scope([slim.batch_norm], is_training=is_training): net = conv2d_same(images, 58, 3, stride=2, scope='conv1') # net = slim.max_pool2d(net, 3, stride=2, padding='SAME', scope='pool1') net = shuffle_block(net, 2, 'stage2_block1') for i in range(3): net = shuffle_block(net, 1, 'stage2_block%d' % (i + 2)) net = shuffle_block(net, 2, 'stage3_block1') for i in range(7): net = shuffle_block(net, 1, 'stage3_block%d' % (i + 2)) net = shuffle_block(net, 2, 'stage4_block1') for i in range(3): net = shuffle_block(net, 1, 'stage4_block%d' % (i + 2)) net = conv2d_same(net, 1024, 1, stride=1, scope='conv5') # net = slim.avg_pool2d(net, 7, stride=1, padding='VALID', scope='global_pool') net = slim.flatten(net) net = slim.fully_connected( net, 128, # weights_initializer=tf.truncated_normal_initializer(stddev=0.001), normalizer_fn=slim.batch_norm, # normalizer_params={'param_initializers': {'gamma': tf.constant_initializer(0.1)}}, activation_fn=None, scope='fc1') if isinstance(n_class, int): net = slim.flatten(net) net = slim.fully_connected(net, n_class, activation_fn=None, scope='logits') return net
def build_net(images, n_class=None, is_training=True, reuse=False, scope='resnet_50_half'): with tf.variable_scope(scope, reuse=reuse): with slim.arg_scope([slim.batch_norm], is_training=is_training): net_spec = [[[i, i, i * 4] for i in [32, 64, 128, 256]], [3, 4, 6, 3]] with slim.arg_scope([slim.conv2d], normalizer_fn=None, activation_fn=None): net = conv2d_same(images, 32, 5, stride=2, scope='conv1') for i, spec in enumerate(zip(*net_spec)): stride = 2 if i != 0 else 1 block_spec, n_block = spec for j in range(n_block): net = pre_bottleneck_block(net, block_spec, stride=stride, scope='res_%d_%d' % (i + 1, j + 1)) stride = 1 net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm') net = slim.conv2d(net, 256, 1, stride=1, scope='last_conv') net = slim.flatten(net) net = slim.fully_connected( net, 128, # weights_initializer=tf.truncated_normal_initializer(stddev=0.01), activation_fn=None, scope='fc5') if isinstance(n_class, int): net = slim.flatten(net) net = slim.fully_connected(net, n_class, activation_fn=None, scope='logits') # add summarys slim.summarize_collection(tf.GraphKeys.MODEL_VARIABLES) return net
def build_net(images, n_class=None, is_training=True, reuse=False, scope='resnet_152_half'): with tf.variable_scope(scope, reuse=reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): net_spec = [[[i, i] for i in [32, 64, 128, 256]], [2, 2, 2, 2]] net = conv2d_same(images, 32, 3, stride=2, scope='conv1') for i, spec in enumerate(zip(*net_spec)): stride = 2 if i != 0 else 1 block_spec, n_block = spec for j in range(n_block): net = pre_basic_block(net, block_spec, stride=stride, scope='res_%d_%d' % (i + 1, j + 1)) stride = 1 net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm') net = slim.flatten(net) # net = slim.dropout(net, 0.4) net = slim.fully_connected( net, 128, # weights_initializer=tf.truncated_normal_initializer(stddev=0.001), normalizer_fn=slim.batch_norm, activation_fn=None, scope='fc5') if isinstance(n_class, int): net = slim.flatten(net) net = slim.fully_connected(net, n_class, activation_fn=None, scope='logits') return net
def build_net(images, n_class=None, is_training=True, reuse=False, alpha=1.25, scope='mobile_id'): with tf.variable_scope(scope, reuse=reuse) as scope: with slim.arg_scope([slim.batch_norm], is_training=is_training): net = conv2d_same(images, int(32 * alpha), 3, stride=2, scope='conv1') net = mobile_block(net, int(16 * alpha), 3, stride=1, t=1, scope='mblock1_1') net = mobile_block(net, int(16 * alpha), 3, stride=1, scope='mblock1_2') net = mobile_block(net, int(24 * alpha), 3, stride=2, scope='mblock2_1') net = mobile_block(net, int(24 * alpha), 3, stride=1, scope='mblock2_2') net = mobile_block(net, int(24 * alpha), 3, stride=1, scope='mblock2_3') net = mobile_block(net, int(32 * alpha), 3, stride=2, scope='mblock3_1') net = mobile_block(net, int(32 * alpha), 3, stride=1, scope='mblock3_2') net = mobile_block(net, int(32 * alpha), 3, stride=1, scope='mblock3_3') net = mobile_block(net, int(32 * alpha), 3, stride=1, scope='mblock3_4') net = mobile_block(net, int(64 * alpha), 3, stride=2, scope='mblock4_1') for i in range(3): net = mobile_block(net, int(64 * alpha), 3, stride=1, scope='mblock4_%d' % (i + 2)) for i in range(4): net = mobile_block(net, int(96 * alpha), 3, stride=1, scope='mblock5_%d' % (i + 1)) net = mobile_block(net, int(160 * alpha), 3, stride=2, scope='mblock6_1') for i in range(3): net = mobile_block(net, int(160 * alpha), 3, stride=1, scope='mblock6_%d' % (i + 2)) net = mobile_block(net, int(320 * alpha), 3, stride=1, scope='mblock7_1') # net = mobile_block(net, int(320*alpha), 3, stride=1, scope='mblock7_2') net = slim.conv2d(net, 1280, 1, stride=1, scope='final_point_conv') net = slim.flatten(net) net = slim.fully_connected( net, 128, weights_initializer=tf.truncated_normal_initializer( stddev=0.001), normalizer_fn=slim.batch_norm, # normalizer_params={'param_initializers': {'gamma': tf.constant_initializer(0.0001)}}, activation_fn=None, scope='fc1') if isinstance(n_class, int): net = slim.flatten(net) net = slim.fully_connected(net, n_class, activation_fn=None, scope='logits') # add summarys slim.summarize_collection(tf.GraphKeys.MODEL_VARIABLES) return net
def build_net(images, n_class=None, is_training=True, reuse=False, alpha=1, scope='mobile_id'): with tf.variable_scope(scope, reuse=reuse) as scope: with slim.arg_scope([slim.batch_norm], is_training=is_training): net = conv2d_same(images, int(64 * alpha), 3, stride=2, scope='conv1') net = slim.separable_conv2d(net, None, 3, 1, stride=1, padding='SAME', scope='depth_conv1') net = mobile_block(net, int(64 * alpha), 3, stride=2, t=2, scope='mblock1_1') for i in range(4): net = mobile_block(net, int(64 * alpha), 3, stride=1, t=2, scope='mblock1_%d' % (i + 2)) net = mobile_block(net, int(128 * alpha), 3, stride=2, t=4, scope='mblock2_1') for i in range(6): net = mobile_block(net, int(128 * alpha), 3, stride=1, t=2, scope='mblock2_%d' % (i + 2)) net = mobile_block(net, int(128 * alpha), 3, stride=2, t=4, scope='mblock3_1') for i in range(2): net = mobile_block(net, int(128 * alpha), 3, stride=1, t=2, scope='mblock3_%d' % (i + 2)) net = slim.conv2d(net, 512, 1, stride=1, scope='final_point_conv') net = slim.separable_conv2d(net, None, [7, 6], 1, stride=1, padding='VALID', scope='GDConv') print(net.shape) net = slim.flatten(net) net = slim.fully_connected( net, 128, # weights_initializer=tf.truncated_normal_initializer(stddev=0.001), normalizer_fn=slim.batch_norm, weights_regularizer=slim.l2_regularizer(4e-4), # normalizer_params={'param_initializers': {'gamma': tf.constant_initializer(0.1)}}, activation_fn=None, scope='fc1') if isinstance(n_class, int): net = slim.flatten(net) net = slim.fully_connected(net, n_class, activation_fn=None, scope='logits') # add summarys slim.summarize_collection(tf.GraphKeys.MODEL_VARIABLES) return net