Exemplo n.º 1
0
    def __init__(self, layers=16, use_bn=False, num_classes=1000):
        super(VGG, self).__init__()
        self.layers = layers
        self.use_bn = use_bn
        supported_layers = [16, 19]
        assert layers in supported_layers

        if layers == 16:
            depth = [2, 2, 3, 3, 3]
        elif layers == 19:
            depth = [2, 2, 4, 4, 4]

        num_channels = [3, 64, 128, 256, 512]
        num_filters = [64, 128, 256, 512, 512]

        self.layer1 = fluid.dygraph.Sequential(*self.make_layer(num_channels[0], num_filters[0], depth[0], use_bn, name='layer1'))
        self.layer2 = fluid.dygraph.Sequential(*self.make_layer(num_channels[1], num_filters[1], depth[1], use_bn, name='layer2'))
        self.layer3 = fluid.dygraph.Sequential(*self.make_layer(num_channels[2], num_filters[2], depth[2], use_bn, name='layer3'))
        self.layer4 = fluid.dygraph.Sequential(*self.make_layer(num_channels[3], num_filters[3], depth[3], use_bn, name='layer4'))
        self.layer5 = fluid.dygraph.Sequential(*self.make_layer(num_channels[4], num_filters[4], depth[4], use_bn, name='layer5'))

        self.classifier = fluid.dygraph.Sequential(
                Linear(input_dim=512 * 7 * 7, output_dim=4096, act='relu'),
                Dropout(),
                Linear(input_dim=4096, output_dim=4096, act='relu'),
                Dropout(),
                Linear(input_dim=4096, output_dim=num_classes))
                
        self.out_dim = 512 * 7 * 7
    def __init__(self, num_classes=59, backbone='resnet50'):
        super(PSPNet, self).__init__()

        res = ResNet50(pretrained=False)
        # stem: res.conv, res.pool2d_max
        self.layer0 = fluid.dygraph.Sequential(
            res.conv,
            res.pool2d_max
        )
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4

        num_channels = 2048
        # psp: 2048 -> 2048*2
        self.pspmodule = PSPModule(num_channels, [1, 2, 3, 6])
        num_channels *= 2

        # cls: 2048*2 -> 512 -> num_classes
        self.classifier = fluid.dygraph.Sequential(
            Conv2D(num_channels, num_filters=512, filter_size=3, padding=1),
            BatchNorm(512, act='relu'),
            Dropout(0.1),
            Conv2D(512, num_classes, filter_size=1)
        )
Exemplo n.º 3
0
    def __init__(self, num_classes=1):
        super(CNN_AllTricks, self).__init__()

        self.conv1 = Conv2D(3, 64, 3, padding=1, stride=1, act='leaky_relu')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 3, padding=1, stride=1, act='leaky_relu')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 3, padding=1, stride=1, act='leaky_relu')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 3, padding=1, stride=1, act='leaky_relu')
        self.bn4 = BatchNorm(512)
        self.block5 = ConcatConv((512, 384), (384, 256), (256, 256),
                                 128,
                                 act_fun='leaky_relu')
        self.bn5 = BatchNorm(1024)
        self.block6 = ConcatConv((1024, 384), (384, 256), (256, 256),
                                 128,
                                 act_fun='leaky_relu')
        self.bn6 = BatchNorm(1024)

        self.pool_global = Pool2D(pool_stride=1,
                                  global_pooling=True,
                                  pool_type='avg')
        self.fc = Linear(input_dim=1024, output_dim=num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.dropout = Dropout(p=0.5)
    def __init__(self,
                 block=BasicBlock,
                 layers=50,
                 inp=3,
                 num_classes=400,
                 input_size=112,
                 dropout=0.5):
        self.inplanes = 64
        self.inp = inp
        super(ResNet, self).__init__()
        self.conv1 = Conv2D(inp,
                            64,
                            filter_size=7,
                            stride=2,
                            padding=3,
                            bias_attr=False)
        self.bn1 = BatchNorm(64)
        self.relu = fluid.layers.relu  #nn.ReLU(inplace=True)
        self.maxpool = Pool2D(
            pool_size=3, pool_stride=2, pool_padding=1,
            pool_type='max')  #nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.rep_of_rep = repofrep("flowofflow")
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        # probably need to adjust this based on input spatial size
        size = int(math.ceil(input_size / 32))
        self.avgpool = Pool2D(pool_size=size,
                              pool_stride=1,
                              pool_padding=0,
                              pool_type='avg')  #nn.AvgPool2d(size, stride=1)
        self.dropout = Dropout(dropout)  #nn.Dropout(p=dropout)
        self.fc = Linear(512 * block.expansion, num_classes)
Exemplo n.º 5
0
    def __init__(self, num_classes=1):
        super(AlexNet, self).__init__()

        self.conv1 = Conv2D(num_channels=3, num_filters=96, filter_size=11, stride=4, padding=2, act='relu')
        self.pool1 = Pool2D(pool_size=3, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=96, num_filters=256, filter_size=5, stride=1, padding=2, act='relu')
        self.pool2 = Pool2D(pool_size=3, pool_stride=2, pool_type='max')
        self.conv3 = Conv2D(num_channels=256, num_filters=384, filter_size=3, stride=1, padding=1, act='relu')
        self.conv4 = Conv2D(num_channels=384, num_filters=384, filter_size=3, stride=1, padding=1, act='relu')
        self.conv5 = Conv2D(num_channels=384, num_filters=256, filter_size=3, stride=1, padding=1, act='relu')
        self.pool5 = Pool2D(pool_size=3, pool_stride=2, pool_type='max')

        self.fc1 = Linear(256 * 6 * 6, 4096, act='relu')
        self.drop_out1 = Dropout(p=0.5)
        self.fc2 = Linear(4096, 4096, act='relu')
        self.drop_out2 = Dropout(p=0.5)
        self.fc3 = Linear(4096, num_classes, act='relu')
Exemplo n.º 6
0
 def _prepare_tsn(self, num_class):
     feature_dim = getattr(self.base_model,
                           self.base_model.last_layer_name).weight.shape[0]
     setattr(self.base_model, self.base_model.last_layer_name,
             Dropout(p=self.dropout))
     self.new_fc = Linear(feature_dim,
                          num_class,
                          act='softmax',
                          param_attr=fluid.ParamAttr(learning_rate=1.0),
                          bias_attr=fluid.ParamAttr(learning_rate=2.0))
Exemplo n.º 7
0
    def __init__(self, hidden_unit_num, image_size, in_channel, patch_num,
                 dropout):
        super(ResNet24, self).__init__()
        self.patch_size = image_size // patch_num
        self.patch_num = patch_num
        n_patch_size = (image_size // patch_num) * (image_size // patch_num)
        self.position_embedding = fluid.layers.create_parameter(
            (1, n_patch_size + 1, hidden_unit_num),
            dtype='float32',
            is_bias=True)
        self.cls_token = fluid.layers.create_parameter((1, 1, hidden_unit_num),
                                                       is_bias=False,
                                                       dtype='float32')
        self.dropout = Dropout(dropout)

        self.layer1_1 = BasicBlock(num_channels=in_channel,
                                   num_filters=24,
                                   stride=1,
                                   shortcut=False)
        self.layer1_2 = BasicBlock(num_channels=24,
                                   num_filters=48,
                                   stride=2,
                                   shortcut=False)
        self.layer2_1 = BasicBlock(num_channels=48,
                                   num_filters=48,
                                   stride=1,
                                   shortcut=False)
        self.layer2_2 = BasicBlock(num_channels=48,
                                   num_filters=48,
                                   stride=2,
                                   shortcut=False)
        self.layer3_1 = BasicBlock(num_channels=48,
                                   num_filters=128,
                                   stride=1,
                                   shortcut=False)
        self.layer3_2 = BasicBlock(num_channels=128,
                                   num_filters=256,
                                   stride=2,
                                   shortcut=False)
        self.layer4_1 = BasicBlock(num_channels=256,
                                   num_filters=256,
                                   stride=1,
                                   shortcut=False)
        self.layer4_2 = BasicBlock(num_channels=256,
                                   num_filters=256,
                                   stride=1,
                                   shortcut=False)
        self.layer5_1 = BasicBlock(num_channels=256,
                                   num_filters=512,
                                   stride=1,
                                   shortcut=False)
        self.layer5_2 = BasicBlock(num_channels=512,
                                   num_filters=hidden_unit_num,
                                   stride=2,
                                   shortcut=False)
Exemplo n.º 8
0
    def __init__(self, num_classes=59):
        super(FCN8s, self).__init__()
        backbone = VGG16BN(pretrained=False)

        self.layer1 = backbone.layer1
        self.layer1[0].conv._padding = [100, 100]
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)
        self.layer2 = backbone.layer2
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)
        self.layer3 = backbone.layer3
        self.pool3 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)
        self.layer4 = backbone.layer4
        self.pool4 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)
        self.layer5 = backbone.layer5
        self.pool5 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)

        self.fc6 = Conv2D(512, 4096, 7, act='relu')
        self.fc7 = Conv2D(4096, 4096, 1, act='relu')
        self.drop6 = Dropout()
        self.drop7 = Dropout()

        self.score = Conv2D(4096, num_classes, 1)
        self.score_pool3 = Conv2D(256, num_classes, 1)
        self.score_pool4 = Conv2D(512, num_classes, 1)

        self.up_output = Conv2DTranspose(num_channels=num_classes,
                                        num_filters=num_classes,
                                        filter_size=4,
                                        stride=2,
                                        bias_attr=False)

        self.up_pool4 = Conv2DTranspose(num_channels=num_classes,
                                        num_filters=num_classes,
                                        filter_size=4,
                                        stride=2,
                                        bias_attr=False)

        self.up_final = Conv2DTranspose(num_channels=num_classes,
                                        num_filters=num_classes,
                                        filter_size=16,
                                        stride=8,
                                        bias_attr=False)                                
Exemplo n.º 9
0
    def __init__(self, name_scope, out_chs=20, in_chs=1024, inter_chs=512):
        super(DANet, self).__init__(name_scope)
        name_scope = self.full_name()
        self.in_chs = in_chs
        self.out_chs = out_chs
        self.inter_chs = inter_chs if inter_chs else in_chs

        self.backbone = ResNet(50)
        self.conv5p = Sequential(
            Conv2D(self.in_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )
        self.conv5c = Sequential(
            Conv2D(self.in_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )

        self.sp = PAM_module(self.inter_chs)
        self.sc = CAM_module(self.inter_chs)

        self.conv6p = Sequential(
            Conv2D(self.inter_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )
        self.conv6c = Sequential(
            Conv2D(self.inter_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )

        self.conv7p = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
        self.conv7c = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
        self.conv7pc = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
Exemplo n.º 10
0
    def __init__(self,num_classes=59):
        super(FCN8s,self).__init__()
        vgg16bn = VGG16BN()
        self.layer1 = vgg16bn.layer1
        self.layer1[0].conv._padding = [100,100]
        self.layer2 = vgg16bn.layer2
        self.layer3 = vgg16bn.layer3
        self.layer4 = vgg16bn.layer4
        self.layer5 = vgg16bn.layer5

        # self.conv1_1 = Conv2D(3,64,3,padding=1)
        # self.conv1_2 = Conv2D(64,64,3,padding=1)
        self.pool1 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        # self.conv2_1 = Conv2D(64,128,3,padding=1)
        # self.conv2_2 = Conv2D(128,128,3,padding=1)
        self.pool2 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        # self.conv3_1 = Conv2D(128,256,3,padding=1)
        # self.conv3_2 = Conv2D(256,256,3,padding=1)
        # self.conv3_3 = Conv2D(256,256,3,padding=1)
        self.pool3 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        # self.conv4_1 = Conv2D(256,512,3,padding=1)
        # self.conv4_2 = Conv2D(512,512,3,padding=1)
        # self.conv4_3 = Conv2D(512,512,3,padding=1)
        self.pool4 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        # self.conv5_1 = Conv2D(512,512,3,padding=1)
        # self.conv5_2 = Conv2D(512,512,3,padding=1)
        # self.conv5_3 = Conv2D(512,512,3,padding=1)
        self.pool5 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        self.conv6 = Conv2D(512,4096,1,act='relu')
        self.conv7 = Conv2D(4096,4096,1,act='relu')
        self.drop6 = Dropout()
        self.drop7 = Dropout()

        self.score = Conv2D(4096,num_classes,1)
        self.score_pool3 = Conv2D(256,num_classes,1)
        self.score_pool4 = Conv2D(512,num_classes,1,)
        self.upsample1 = Conv2DTranspose(num_classes,num_classes,filter_size=4,stride=2,padding=2,bias_attr=False)
        self.upsample2 = Conv2DTranspose(num_classes,num_classes,filter_size=4,stride=2,padding=2,bias_attr=False)
        self.upsample3 = Conv2DTranspose(num_classes,num_classes,filter_size=16,stride=8,padding=1,bias_attr=False)
Exemplo n.º 11
0
    def __init__(self, num_classes=1):
        super(GoogLeNet_BN, self).__init__()

        self.bn64 = BatchNorm(64)
        self.bn192 = BatchNorm(192)
        self.bn256 = BatchNorm(256)
        self.bn480 = BatchNorm(480)
        self.bn512 = BatchNorm(512)
        self.bn528 = BatchNorm(528)
        self.bn832 = BatchNorm(832)
        self.bn1024 = BatchNorm(1024)

        self.conv1 = Conv2D(3, 64, 7, padding=3, stride=2, act='relu')
        self.pool1 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')
        self.conv2_1 = Conv2D(64, 64, 1, act='relu')
        self.conv2_2 = Conv2D(64, 192, 3, padding=1, act='relu')
        self.pool2 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')
        self.block3_a = Inception((192, 64), (96, 128), (16, 32), 32)
        self.block3_b = Inception((256, 128), (128, 192), (32, 96), 64)
        self.pool3 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')
        self.block4_a = Inception((480, 192), (96, 208), (16, 48), 64)
        self.block4_b = Inception((512, 160), (112, 224), (24, 64), 64)
        self.block4_c = Inception((512, 128), (128, 256), (24, 64), 64)
        self.block4_d = Inception((512, 112), (144, 288), (32, 64), 64)
        self.block4_e = Inception((528, 256), (160, 320), (32, 128), 128)
        self.pool4 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')
        self.block5_a = Inception((832, 256), (160, 320), (32, 128), 128)
        self.block5_b = Inception((832, 384), (192, 384), (48, 128), 128)
        self.pool5 = Pool2D(pool_size=7,
                            pool_stride=1,
                            global_pooling=True,
                            pool_type='avg')
        self.drop = Dropout(p=0.4)
        self.fc = Linear(1024, num_classes)
Exemplo n.º 12
0
    def __init__(self, num_classes=1):
        super(CNN_ConcatConv, self).__init__()

        self.conv1 = Conv2D(3, 64, 5, padding=2, stride=1, act='sigmoid')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 5, padding=2, stride=1, act='sigmoid')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 5, padding=2, stride=1, act='sigmoid')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 5, padding=2, stride=1, act='sigmoid')
        self.bn4 = BatchNorm(512)
        self.block5 = ConcatConv(self.full_name(), (512, 384), (384, 256),
                                 (256, 256), 128)
        self.bn5 = BatchNorm(1024)
        self.conv6 = Conv2D(1024, 1024, 5, padding=2, stride=1, act='sigmoid')
        self.bn6 = BatchNorm(1024)

        self.fc1 = Linear(1024 * 7 * 7, 1024, act='sigmoid')
        self.fc2 = Linear(1024, num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.dropout = Dropout(p=0.5)
Exemplo n.º 13
0
    def __init__(self, num_classes=1):
        super(CNN_DropOut, self).__init__()

        self.conv1 = Conv2D(3, 64, 5, padding=2, stride=1, act='sigmoid')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 5, padding=2, stride=1, act='sigmoid')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 5, padding=2, stride=1, act='sigmoid')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 5, padding=2, stride=1, act='sigmoid')
        self.bn4 = BatchNorm(512)
        self.conv5 = Conv2D(512, 1024, 5, padding=2, stride=1, act='sigmoid')
        self.bn5 = BatchNorm(1024)
        self.conv6 = Conv2D(1024, 1024, 5, padding=2, stride=1, act='sigmoid')
        self.bn6 = BatchNorm(1024)

        self.fc1 = Linear(1024 * 7 * 7, 1024, act='sigmoid')
        self.fc2 = Linear(1024, num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # 按照50%的比例随机丢弃部分神经元
        self.dropout = Dropout(p=0.5)
Exemplo n.º 14
0
 def __init__(self, in_planes):
     super(BasicLiner, self).__init__()
     self.fc = linear(in_planes, in_planes)
     self.bn = norm_layer(in_planes)
     self.act = act_layer
     self.dp = Dropout()
Exemplo n.º 15
0
    def __init__(self, num_classes=1):
        super(VGG13, self).__init__()

        self.drop_out = Dropout(p=0.5)
        self.pool = Pool2D(pool_size=3, pool_stride=2, pool_type='max')

        self.conv1 = Conv2D(num_channels=3,
                            num_filters=64,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')

        self.conv2 = Conv2D(num_channels=64,
                            num_filters=128,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')

        self.conv3 = Conv2D(num_channels=128,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv4 = Conv2D(num_channels=256,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')

        self.conv5 = Conv2D(num_channels=256,
                            num_filters=512,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv6 = Conv2D(num_channels=512,
                            num_filters=512,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')

        self.conv7 = Conv2D(num_channels=512,
                            num_filters=512,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv8 = Conv2D(num_channels=512,
                            num_filters=512,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')

        self.fc1 = Linear(512 * 6 * 6, 4096, act='relu')
        self.fc2 = Linear(4096, 4096, act='relu')
        self.fc3 = Linear(4096, num_classes, act='relu')
Exemplo n.º 16
0
def build_dropout(attr, channels=None, conv_bias=False):
    return Dropout(p=attr['dropout_ratio']), channels
Exemplo n.º 17
0
    def __init__(self,
                 batch_size,
                 block_fn,
                 layers,
                 num_classes,
                 data_format='channels_last',
                 non_local=[],
                 rep_flow=[],
                 size=112,
                 dropout_keep_prob=0.5):
        """Generator for ResNet v1 models.
  Args:
    block_fn: `function` for the block to use within the model. Either
        `residual_block` or `bottleneck_block`.
    layers: list of 4 `int`s denoting the number of blocks to include in each
      of the 4 block groups. Each group consists of blocks that take inputs of
      the same resolution.
    num_classes: `int` number of possible classes for image classification.
    data_format: `str` either "channels_first" for `[batch, channels, height,
        width]` or "channels_last for `[batch, height, width, channels]`.
  Returns:
    Model `function` that takes in `inputs` and `is_training` and returns the
    output `Tensor` of the ResNet model.
    """
        super(ResNet3D, self).__init__()
        is_training = False

        self.stem = Conv2D(
            3,
            64,
            filter_size=7,
            bias_attr=fluid.ParamAttr(trainable=False),
            stride=2,
            padding=3,
            param_attr=fluid.initializer.MSRAInitializer(uniform=False))

        self.bn1 = BatchNorm(64, act='relu')
        self.num_classes = num_classes

        self.rep_flow = rf.FlowLayer(batch_size,
                                     block_fn.expansion * 128,
                                     bn=False)
        self.flow_conv = Conv2D(512, 512, filter_size=3, stride=1, padding=1)
        self.rep_flow2 = rf.FlowLayer(batch_size, 512)
        # res 2
        inputs = 64
        self.res2 = Block3D(block_ind=1,
                            inputs=inputs,
                            filters=64,
                            block_fn=block_fn,
                            blocks=layers[0],
                            strides=1,
                            is_training=is_training,
                            name='block_group1',
                            data_format=data_format,
                            non_local=non_local[0])

        # res 3
        inputs = 64 * block_fn.expansion
        self.res3 = Block3D(block_ind=2,
                            inputs=inputs,
                            filters=128,
                            block_fn=block_fn,
                            blocks=layers[1],
                            strides=2,
                            is_training=is_training,
                            name='block_group2',
                            data_format=data_format,
                            non_local=non_local[1])

        # res 4
        inputs = 128 * block_fn.expansion
        self.res4 = Block3D(block_ind=3,
                            inputs=inputs,
                            filters=256,
                            block_fn=block_fn,
                            blocks=layers[2],
                            strides=2,
                            is_training=is_training,
                            name='block_group3',
                            data_format=data_format,
                            non_local=non_local[2])

        # res 5
        inputs = 256 * block_fn.expansion
        self.res5 = Block3D(block_ind=4,
                            inputs=inputs,
                            filters=512,
                            block_fn=block_fn,
                            blocks=layers[3],
                            strides=2,
                            is_training=is_training,
                            name='block_group4',
                            data_format=data_format,
                            non_local=non_local[3])

        self.dropout = Dropout(0.9)

        self.classify = Linear(
            input_dim=512 * block_fn.expansion,
            output_dim=num_classes,
            act='softmax',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.MSRAInitializer(uniform=True)))