Пример #1
0
def resnet_101_base(img_batch, is_training):
    '''
        this code is from light-head rcnn.

        It is convenient to freeze blocks.
        @author: jemmy li
        @contact: [email protected]
    '''

    bottleneck = resnet_v1.bottleneck
    blocks = [
        modified_resnet_v1_block('block1', bottleneck,
                                 [(256, 64, 1, 1)] * 2 + [(256, 64, 1, 1)]),
        modified_resnet_v1_block('block2', bottleneck,
                                 [(512, 128, 2, 1)] + [(512, 128, 1, 1)] * 3),
        modified_resnet_v1_block('block3', bottleneck, [(1024, 256, 2, 1)] +
                                 [(1024, 256, 1, 1)] * 22)
    ]
    with slim.arg_scope(
            resnet_arg_scope(is_training=False)):  # freeze the base conv_net
        with tf.variable_scope('resnet_v1_101', 'resnet_v1_101'):
            net = resnet_utils.conv2d_same(img_batch,
                                           64,
                                           7,
                                           stride=2,
                                           scope='conv1')
            net = slim.max_pool2d(net, [3, 3],
                                  stride=2,
                                  padding='SAME',
                                  scope='pool1')

    not_freezed = [False
                   ] * cfgs.FIXED_BLOCKS + (4 - cfgs.FIXED_BLOCKS) * [True]
    # Fixed_Blocks can be 1~3

    with slim.arg_scope(
            resnet_arg_scope(is_training=is_training and not_freezed[0])):
        C2, _ = resnet_v1.resnet_v1(net,
                                    blocks[0:1],
                                    global_pool=False,
                                    include_root_block=False,
                                    scope='resnet_v1_101')

    with slim.arg_scope(
            resnet_arg_scope(is_training=is_training and not_freezed[1])):
        C3, _ = resnet_v1.resnet_v1(C2,
                                    blocks[1:2],
                                    global_pool=False,
                                    include_root_block=False,
                                    scope='resnet_v1_101')

    with slim.arg_scope(
            resnet_arg_scope(is_training=is_training and not_freezed[2])):
        C4, _ = resnet_v1.resnet_v1(C3,
                                    blocks[2:3],
                                    global_pool=False,
                                    include_root_block=False,
                                    scope='resnet_v1_101')

    return C4
Пример #2
0
def restnet_head(input, is_training, scope):

    block4 = [
        modified_resnet_v1_block('block4', resnet_v1.bottleneck,
                                 [(2048, 512, 1, 2)] + [(2048, 512, 1, 2)] * 2)
    ]

    with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
        C5_flatten, _ = resnet_v1.resnet_v1(input,
                                            block4,
                                            global_pool=True,
                                            spatial_squeeze=True,
                                            include_root_block=False,
                                            scope=scope)
    # global average pooling C5 to obtain fc layers
    return C5_flatten