Exemple #1
0
def build_single_net(x, is_training, FLAGS):
    with tf.variable_scope('pre'):
        pre = layers.conv(x, num_outputs=32,  kernel_size = [3, 3], scope='conv', b_norm=True, is_training=is_training,
                          weight_decay=FLAGS.weight_decay)
        # pre = layers.max_pool2d(pre, [2, 2], padding='SAME', scope='pool')
    h = pre
    # for i in range(1, n + 1):
    h = block(h, 32, FLAGS.weight_decay, '32_block{}'.format(1), is_training)

    h = tf.nn.max_pool(h, [1, 2, 2, 1], [1, 1, 1, 1], 'SAME')
    # h = block(h, 64, FLAGS.weight_decay, '64_block1', is_training, True)
    h = layers.conv(h, num_outputs = 64, kernel_size=[3, 3], strides=[1, 1, 1, 1],
                    scope='conv1', b_norm=True, is_training=is_training, weight_decay=FLAGS.weight_decay)
    
    # for i in range(2, n + 1):
    h = block(h, 64, FLAGS.weight_decay, '64_block{}'.format(1), is_training)

    h = tf.nn.max_pool(h, [1, 2, 2, 1], [1, 1, 1, 1], 'SAME')
    h = layers.conv(h, num_outputs = 128, kernel_size=[3, 3], strides=[1, 1, 1, 1],
                    scope='conv2', b_norm=True, is_training=is_training, weight_decay=FLAGS.weight_decay)
    
    # for i in range(2, n + 1):
    h = block(h, 128, FLAGS.weight_decay, '128_block{}'.format(1), is_training)

    shape = h.get_shape().as_list()

    h = tf.contrib.layers.avg_pool2d(h, [shape[1], shape[2]], scope='global_pool')

    return h
Exemple #2
0
def build_single_net(x, is_training, FLAGS, nn=10):
    n = FLAGS.blocks
    # shape = x.get_shape().as_list()
    with tf.variable_scope('pre'):
        pre = layers.conv(x, num_outputs=24,  kernel_size=[3, 3], scope='conv', b_norm=True, is_training=is_training,
                          weight_decay=FLAGS.weight_decay)
        # pre = layers.max_pool2d(pre, [2, 2], padding='SAME', scope='pool')
    h = pre
    for i in range(1, n + 1):
        h = block(h, 24, FLAGS.weight_decay, '24_block{}'.format(i), is_training)

    h = block(h, 48, FLAGS.weight_decay, '46_block1', is_training, True)
    for i in range(2, n + 1):
        h = block(h, 48, FLAGS.weight_decay, '48_block{}'.format(i), is_training)

    h = block(h, 96, FLAGS.weight_decay, '96_block1', is_training, True)
    for i in range(2, n + 1):
        h = block(h, 96, FLAGS.weight_decay, '96_block{}'.format(i), is_training)

    shape = h.get_shape().as_list()

    h = tf.contrib.layers.avg_pool2d(h, [shape[1], shape[2]], scope='global_pool')
    res = layers.conv(h, num_outputs=nn, kernel_size=[1, 1], scope='fc1', padding='VALID',
                    b_norm=True, is_training=is_training, weight_decay=FLAGS.weight_decay, activation_fn=None)

    return res
Exemple #3
0
def build_net(x, is_training, FLAGS):
    n = FLAGS.blocks
    # shape = x.get_shape().as_list()
    with tf.variable_scope('pre'):
        pre = layers.conv(x,
                          num_outputs=16,
                          kernel_size=[3, 3],
                          scope='conv1',
                          b_norm=True,
                          is_training=is_training,
                          weight_decay=FLAGS.weight_decay)
        pre = layers.conv(pre,
                          num_outputs=32,
                          kernel_size=[3, 3],
                          scope='conv2',
                          b_norm=True,
                          is_training=is_training,
                          weight_decay=FLAGS.weight_decay)
        # pre = layers.max_pool2d(pre, [2, 2], padding='SAME', scope='pool')
    h = pre
    for i in range(1, n + 1):
        h = block(h, 32, FLAGS.weight_decay, '32_block{}'.format(i),
                  is_training)

    h = block(h, 64, FLAGS.weight_decay, '64_block1', is_training, True)
    for i in range(2, n + 1):
        h = block(h, 64, FLAGS.weight_decay, '64_block{}'.format(i),
                  is_training)

    h = block(h, 128, FLAGS.weight_decay, '128_block1', is_training, True)
    for i in range(2, n + 1):
        h = block(h, 128, FLAGS.weight_decay, '128_block{}'.format(i),
                  is_training)

    shape = h.get_shape().as_list()

    h = tf.contrib.layers.avg_pool2d(h, [shape[1], shape[2]],
                                     scope='global_pool')
    h = layers.conv(h,
                    num_outputs=64,
                    kernel_size=[1, 1],
                    scope='fc1',
                    padding='VALID',
                    b_norm=True,
                    is_training=is_training,
                    weight_decay=FLAGS.weight_decay,
                    activation_fn=None)

    h = layers.conv(h,
                    num_outputs=FLAGS.num_classes,
                    kernel_size=[1, 1],
                    scope='fc2',
                    padding='VALID',
                    b_norm=True,
                    is_training=is_training,
                    weight_decay=FLAGS.weight_decay,
                    activation_fn=None)

    return tf.reshape(h, [-1, FLAGS.num_classes])
Exemple #4
0
def build_net(x, is_training, FLAGS):
    n = FLAGS.blocks
    # shape = x.get_shape().as_list()
    with tf.variable_scope('pre'):
        pre = layers.conv(x,
                          num_outputs=64,
                          kernel_size=[7, 7],
                          scope='conv',
                          b_norm=True,
                          is_training=is_training,
                          weight_decay=FLAGS.weight_decay)
        pre = tf.contrib.layers.max_pool2d(pre, [3, 3],
                                           stride=2,
                                           padding='SAME',
                                           scope='pool')  #32
    h = pre
    for i in range(1, 4):
        h = block(h, 64, FLAGS.weight_decay, '64_block{}'.format(i),
                  is_training)

    h = block(h, 128, FLAGS.weight_decay, '128_block_s2', is_training,
              True)  #16
    for i in range(1, 4):
        h = block(h, 128, FLAGS.weight_decay, '128_block{}'.format(i),
                  is_training)

    h = block(h, 256, FLAGS.weight_decay, '256_block_s2', is_training, True)
    for i in range(1, 6):
        h = block(h, 256, FLAGS.weight_decay, '256_block{}'.format(i),
                  is_training)

    h = block(h, 512, FLAGS.weight_decay, '512_block_s2', is_training, True)
    for i in range(1, 3):
        h = block(h, 512, FLAGS.weight_decay, '512_block{}'.format(i),
                  is_training)
    shape = h.get_shape().as_list()
    h = tf.contrib.layers.avg_pool2d(h, [shape[1], shape[2]],
                                     scope='global_pool')
    shape = h.get_shape().as_list()
    h = layers.conv(h,
                    num_outputs=FLAGS.num_classes,
                    kernel_size=[shape[1], shape[2]],
                    scope='fc1',
                    padding='VALID',
                    b_norm=True,
                    is_training=is_training,
                    weight_decay=FLAGS.weight_decay,
                    activation_fn=None)

    return tf.reshape(h, [-1, FLAGS.num_classes])
Exemple #5
0
def build_net(x, is_training, FLAGS):
    n = FLAGS.blocks
    with tf.variable_scope('pre'):
        pre = layers.conv(x, num_outputs=16,  kernel_size = [3, 3], scope='conv', b_norm=True, is_training=is_training,
                          weight_decay=FLAGS.weight_decay)

    h1 = 
Exemple #6
0
def build_net(x, is_training, FLAGS):
    num_branches = FLAGS.num_branches
    y = []
    for i in range(1, num_branches + 1):
        with tf.variable_scope('branch%d' % i):
            tmp = build_single_net(x, is_training, FLAGS)
            if FLAGS.p_relu:
                tmp = layers.p_relu(tmp)
        y.append(tmp)

    with tf.variable_scope('branch%d' % (num_branches + 1)):
        y4 = build_single_net_w(x, is_training, FLAGS)

    con = tf.concat(y, axis=3)  #192
    con = tf.reshape(con, [-1, 1, num_branches * 224])

    w = layers.conv(y4,
                    num_outputs=FLAGS.num_classes * num_branches * 224,
                    kernel_size=[1, 1],
                    scope='fc2',
                    padding='VALID',
                    b_norm=True,
                    is_training=is_training,
                    weight_decay=FLAGS.weight_decay,
                    activation_fn=None)
    w = tf.reshape(w, [-1, num_branches * 224, FLAGS.num_classes])

    res = tf.matmul(con, w)
    res = layers.batch_norm(res, is_training=is_training, scope='bn')
    return tf.reshape(res, [-1, FLAGS.num_classes])
def build_net(x, is_training, FLAGS):
    n = FLAGS.blocks
    # shape = x.get_shape().as_list()
    with tf.variable_scope('pre'):
        pre = layers.conv(x, num_outputs=64,  kernel_size = [3, 3], scope='conv', b_norm=True, is_training=is_training,
                          weight_decay=FLAGS.weight_decay, activation_fn=tf.nn.elu)
        # pre = layers.max_pool2d(pre, [2, 2], padding='SAME', scope='pool')
    h = pre

    shape = h.get_shape().as_list()

    pool0 = tf.contrib.layers.avg_pool2d(h, [shape[1], shape[2]], scope='global_pool0')
    fc0 = layers.conv(pool0, num_outputs=FLAGS.num_classes, kernel_size=[1, 1], scope='fc0', padding='VALID',
                    b_norm=True, is_training=is_training, weight_decay=FLAGS.weight_decay, activation_fn=None)

    for i in range(1, 4):
        h = block(h, 64, FLAGS.weight_decay, '64_block{}'.format(i), is_training)

    shape = h.get_shape().as_list()

    pool1 = tf.contrib.layers.avg_pool2d(h, [shape[1], shape[2]], scope='global_pool1')
    fc1 = layers.conv(pool1, num_outputs=FLAGS.num_classes, kernel_size=[1, 1], scope='fc1', padding='VALID',
                    b_norm=True, is_training=is_training, weight_decay=FLAGS.weight_decay, activation_fn=None)

    h = block(h, 128, FLAGS.weight_decay, '128_block1', is_training, True)
    for i in range(2, 4):
        h = block(h, 128, FLAGS.weight_decay, '128_block{}'.format(i), is_training)
    
    shape = h.get_shape().as_list()

    pool2 = tf.contrib.layers.avg_pool2d(h, [shape[1], shape[2]], scope='global_pool2')
    fc2 = layers.conv(pool2, num_outputs=FLAGS.num_classes, kernel_size=[1, 1], scope='fc2', padding='VALID',
                    b_norm=True, is_training=is_training, weight_decay=FLAGS.weight_decay, activation_fn=None)

    h = block(h, 256, FLAGS.weight_decay, '256_block1', is_training, True)
    for i in range(2, 6):
        h = block(h, 256, FLAGS.weight_decay, '256_block{}'.format(i), is_training)
    
    shape = h.get_shape().as_list()

    pool3 = tf.contrib.layers.avg_pool2d(h, [shape[1], shape[2]], scope='global_pool3')
    
    fc3 = layers.conv(pool3, num_outputs=FLAGS.num_classes, kernel_size=[1, 1], scope='fc3', padding='VALID',
                    b_norm=True, is_training=is_training, weight_decay=FLAGS.weight_decay, activation_fn=None)

    h = fc0 + fc1 + fc2 + fc3
    return tf.reshape(h, [-1, FLAGS.num_classes])
def block(inputs, num_outputs, weight_decay, scope, is_training, down_sample = False):
    with tf.variable_scope(scope):
        num_inputs = inputs.get_shape().as_list()[3]

        res = layers.conv(inputs, num_outputs = num_outputs, kernel_size=[3, 3], activation_fn=tf.nn.elu,
                          strides=[1, 2, 2, 1] if down_sample else [1, 1, 1, 1],
                          scope='conv1', b_norm=True, is_training=is_training, weight_decay=weight_decay)

        res = layers.conv(res, num_outputs=num_outputs, kernel_size=[3, 3], activation_fn=None,
                          scope='conv2', b_norm=True, is_training=is_training, weight_decay=weight_decay)
        if  num_inputs != num_outputs:
            inputs = layers.conv(inputs, num_outputs=num_outputs, kernel_size=[1, 1], activation_fn=None,
                                 scope='short_cut', strides=[1, 2, 2,1 ], b_norm=True, is_training=is_training,
                                 weight_decay=weight_decay)
        res = tf.nn.elu(res + inputs)

    return res