Exemplo n.º 1
0
    def __init__(self, name_scope, dict_dim, batch_size, seq_len):
        super(CNN, self).__init__(name_scope)
        self.dict_dim = dict_dim
        self.emb_dim = 128
        self.hid_dim = 128
        self.fc_hid_dim = 96
        self.class_dim = 2
        self.win_size = [3, self.hid_dim]
        self.batch_size = batch_size
        self.seq_len = seq_len
        self.embedding = Embedding(
            self.full_name(),
            size=[self.dict_dim + 1, self.emb_dim],
            dtype='float32',
            is_sparse=False)

        self._simple_conv_pool_1 = SimpleConvPool(
            self.full_name(),
            self.hid_dim,
            self.win_size,
            batch_size=self.batch_size)
        self._fc1 = FC(self.full_name(), size=self.fc_hid_dim, act="softmax")
        self._fc_prediction = FC(self.full_name(),
                                 size=self.class_dim,
                                 act="softmax")
Exemplo n.º 2
0
 def __init__(self, name_scope, num_classes=1):
     super(LeNet, self).__init__(name_scope)
     name_scope = self.full_name()
     # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
     self.conv1 = Conv2D(name_scope,
                         num_filters=6,
                         filter_size=5,
                         act='sigmoid')
     self.pool1 = Pool2D(name_scope,
                         pool_size=2,
                         pool_stride=2,
                         pool_type='max')
     self.conv2 = Conv2D(name_scope,
                         num_filters=16,
                         filter_size=5,
                         act='sigmoid')
     self.pool2 = Pool2D(name_scope,
                         pool_size=2,
                         pool_stride=2,
                         pool_type='max')
     # 创建第3个卷积层
     self.conv3 = Conv2D(name_scope,
                         num_filters=120,
                         filter_size=4,
                         act='sigmoid')
     # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分裂标签的类别数
     self.fc1 = FC(name_scope, size=64, act='sigmoid')
     self.fc2 = FC(name_scope, size=num_classes)
Exemplo n.º 3
0
    def __init__(self, name_scope):
        super(MNIST, self).__init__(name_scope)

        self._simple_img_conv_pool_1 = SimpleImgConvPool(self.full_name(),
                                                         1,
                                                         20,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        self._simple_img_conv_pool_2 = SimpleImgConvPool(self.full_name(),
                                                         20,
                                                         50,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        pool_2_shape = 50 * 4 * 4
        SIZE = 10
        scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5

        self._fc1 = FC(self.full_name(), 500)
        self._fc2 = FC(self.full_name(), 10)
Exemplo n.º 4
0
    def __init__(self, name_scope, embedding_dim, units, vocab_size):
        super(RNNDecoder, self).__init__(name_scope)

        self.units = units

        self.embedding = Embedding(self.full_name(),
                                   (vocab_size, embedding_dim))
        self.gru = DynamicGRU(self.full_name(), units)
        self.fc1 = FC(self.full_name(), size=units, num_flatten_dims=2)
        self.fc2 = FC(self.full_name(), vocab_size)
        self.attention = BahdanauAttention(self.full_name(), units)
Exemplo n.º 5
0
    def __init__(self,
                 scope_name,
                 rnn_hidden_size=200,
                 is_test=False,
                 use_cudnn=True):
        super(EncoderNet, self).__init__(scope_name)
        self.rnn_hidden_size = rnn_hidden_size
        para_attr = fluid.ParamAttr(initializer=fluid.initializer.Normal(0.0,
                                                                         0.02))
        bias_attr = fluid.ParamAttr(
            initializer=fluid.initializer.Normal(0.0, 0.02), learning_rate=2.0)
        if fluid.framework.in_dygraph_mode():
            h_0 = np.zeros(
                (Config.batch_size, rnn_hidden_size), dtype="float32")
            h_0 = to_variable(h_0)
        else:
            h_0 = fluid.layers.fill_constant(
                shape=[Config.batch_size, rnn_hidden_size],
                dtype='float32',
                value=0)
        self.ocr_convs = OCRConv(
            self.full_name(), is_test=is_test, use_cudnn=use_cudnn)

        self.fc_1_layer = FC(self.full_name(),
                             rnn_hidden_size * 3,
                             param_attr=para_attr,
                             bias_attr=False,
                             num_flatten_dims=2)
        self.fc_2_layer = FC(self.full_name(),
                             rnn_hidden_size * 3,
                             param_attr=para_attr,
                             bias_attr=False,
                             num_flatten_dims=2)
        self.gru_forward_layer = DynamicGRU(
            self.full_name(),
            size=rnn_hidden_size,
            h_0=h_0,
            param_attr=para_attr,
            bias_attr=bias_attr,
            candidate_activation='relu')
        self.gru_backward_layer = DynamicGRU(
            self.full_name(),
            size=rnn_hidden_size,
            h_0=h_0,
            param_attr=para_attr,
            bias_attr=bias_attr,
            candidate_activation='relu',
            is_reverse=True)

        self.encoded_proj_fc = FC(self.full_name(),
                                  Config.decoder_size,
                                  bias_attr=False,
                                  num_flatten_dims=2)
Exemplo n.º 6
0
    def __init__(self, scope_name, decoder_size):
        super(SimpleAttention, self).__init__(scope_name)

        self.fc_1 = FC(self.full_name(),
                       decoder_size,
                       act=None,
                       bias_attr=False)
        self.fc_2 = FC(self.full_name(),
                       1,
                       num_flatten_dims=2,
                       act=None,
                       bias_attr=False)
    def __init__(self, name_scope, dtype="float32"):
        super(MNIST, self).__init__(name_scope)

        self._simple_img_conv_pool_1 = SimpleImgConvPool(
            self.full_name(),
            num_filters=20,
            filter_size=5,
            pool_size=2,
            pool_stride=2,
            act="relu",
            dtype=dtype,
            use_cudnn=True)

        self._simple_img_conv_pool_2 = SimpleImgConvPool(
            self.full_name(),
            num_filters=50,
            filter_size=5,
            pool_size=2,
            pool_stride=2,
            act="relu",
            dtype=dtype,
            use_cudnn=True)

        pool_2_shape = 50 * 4 * 4
        SIZE = 10
        scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5
        self._fc = FC(self.full_name(),
                      10,
                      param_attr=fluid.param_attr.ParamAttr(
                          initializer=fluid.initializer.NormalInitializer(
                              loc=0.0, scale=scale)),
                      act="softmax",
                      dtype=dtype)
    def __init__(self,
                 name_scope,
                 hidden_size,
                 vocab_size,
                 num_layers=2,
                 num_steps=20,
                 init_scale=0.1,
                 dropout=None):
        super(PtbModel, self).__init__(name_scope)
        self.hidden_size = hidden_size
        self.vocab_size = vocab_size
        self.init_scale = init_scale
        self.num_layers = num_layers
        self.num_steps = num_steps
        self.dropout = dropout
        self.simple_lstm_rnn = SimpleLSTMRNN(self.full_name(),
                                             hidden_size,
                                             num_steps,
                                             num_layers=num_layers,
                                             init_scale=init_scale,
                                             dropout=dropout)
        self.embedding = Embedding(
            self.full_name(),
            size=[vocab_size, hidden_size],
            dtype='float32',
            is_sparse=False,
            param_attr=fluid.ParamAttr(
                name='embedding_para',
                initializer=fluid.initializer.UniformInitializer(
                    low=-init_scale, high=init_scale)))

        self.out_project = FC(self.full_name(),
                              self.vocab_size,
                              num_flatten_dims=2)
Exemplo n.º 9
0
 def __init__(self, name_scope):
     super(MNIST, self).__init__(name_scope)
     name_scope = self.full_name()
     # 定义卷积层,输出通道20,卷积核大小为5,步长为1,padding为2,使用relu激活函数
     self.conv1 = Conv2D(name_scope,
                         num_filters=20,
                         filter_size=5,
                         stride=1,
                         padding=2,
                         act='relu')
     # 定义池化层,池化核为2,采用最大池化方式
     self.pool1 = Pool2D(name_scope,
                         pool_size=2,
                         pool_stride=2,
                         pool_type='max')
     # 定义卷积层,输出通道20,卷积核大小为5,步长为1,padding为2,使用relu激活函数
     self.conv2 = Conv2D(name_scope,
                         num_filters=20,
                         filter_size=5,
                         stride=1,
                         padding=2,
                         act='relu')
     # 定义池化层,池化核为2,采用最大池化方式
     self.pool2 = Pool2D(name_scope,
                         pool_size=2,
                         pool_stride=2,
                         pool_type='max')
     # 定义全连接层,输出节点数为10,激活函数使用softmax
     self.fc = FC(name_scope, size=10, act='softmax')
Exemplo n.º 10
0
    def __init__(self, name_scope):
        super(MNIST, self).__init__(name_scope)

        self._simple_img_conv_pool_1 = SimpleImgConvPool(self.full_name(),
                                                         1,
                                                         20,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        self._simple_img_conv_pool_2 = SimpleImgConvPool(self.full_name(),
                                                         20,
                                                         50,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        pool_2_shape = 50 * 4 * 4
        SIZE = 10
        scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5
        self._fc = FC(self.full_name(),
                      10,
                      param_attr=fluid.param_attr.ParamAttr(
                          initializer=fluid.initializer.NormalInitializer(
                              loc=0.0, scale=scale)),
                      act="softmax")
Exemplo n.º 11
0
    def __init__(self, name_scope, layers=50, class_dim=102):
        super(ResNet, self).__init__(name_scope)

        self.layers = layers
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(supported_layers, layers)

        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(
            self.full_name(),
            num_channels=3,
            num_filters=64,
            filter_size=7,
            stride=2,
            act='relu')
        self.pool2d_max = Pool2D(
            self.full_name(),
            pool_size=3,
            pool_stride=2,
            pool_padding=1,
            pool_type='max')

        self.bottleneck_block_list = []
        num_channels = 64
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(
                        self.full_name(),
                        num_channels=num_channels,
                        num_filters=num_filters[block],
                        stride=2 if i == 0 and block != 0 else 1,
                        shortcut=shortcut))
                num_channels = bottleneck_block._num_channels_out
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True

        self.pool2d_avg = Pool2D(
            self.full_name(), pool_size=7, pool_type='avg', global_pooling=True)

        import math
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.out = FC(self.full_name(),
                      size=class_dim,
                      act='softmax',
                      param_attr=fluid.param_attr.ParamAttr(
                          initializer=fluid.initializer.Uniform(-stdv, stdv)))
Exemplo n.º 12
0
    def __init__(self, name_scope, num_channels, reduction_ratio):

        super(SqueezeExcitation, self).__init__(name_scope)
        self._pool = Pool2D(
            self.full_name(), pool_size=0, pool_type='avg', global_pooling=True)
        stdv = 1.0/math.sqrt(num_channels*1.0)
        self._squeeze = FC(
            self.full_name(),
            size=num_channels // reduction_ratio,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv,stdv)),
            act='relu')
        stdv = 1.0/math.sqrt(num_channels/16.0*1.0)
        self._excitation = FC(
            self.full_name(),
            size=num_channels,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv,stdv)),
            act='sigmoid')
Exemplo n.º 13
0
    def __init__(self, name_scope, num_channels, reduction_ratio):

        super(SqueezeExcitation, self).__init__(name_scope)
        self._pool = Pool2D(self.full_name(),
                            pool_size=0,
                            pool_type='avg',
                            global_pooling=True)
        self._squeeze = FC(
            self.full_name(),
            size=num_channels // reduction_ratio,
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
                value=0.05)),
            act='relu')
        self._excitation = FC(
            self.full_name(),
            size=num_channels,
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
                value=0.05)),
            act='sigmoid')
Exemplo n.º 14
0
 def __init__(self, scope_name):
     super(OCRAttention, self).__init__(scope_name)
     self.encoder_net = EncoderNet(self.full_name())
     self.fc = FC(self.full_name(),
                  size=Config.decoder_size,
                  bias_attr=False,
                  act='relu')
     self.embedding = Embedding(
         self.full_name(), [Config.num_classes + 2, Config.word_vector_dim],
         dtype='float32')
     self.gru_decoder_with_attention = GRUDecoderWithAttention(
         self.full_name(), Config.decoder_size, Config.num_classes)
Exemplo n.º 15
0
    def __init__(self, scope_name, decoder_size, num_classes):
        super(GRUDecoderWithAttention, self).__init__(scope_name)
        self.simple_attention = SimpleAttention(self.full_name(), decoder_size)

        self.fc_1_layer = FC(self.full_name(),
                             size=decoder_size * 3,
                             bias_attr=False)
        self.fc_2_layer = FC(self.full_name(),
                             size=decoder_size * 3,
                             bias_attr=False)
        self.gru_unit = GRUUnit(
            self.full_name(),
            size=decoder_size * 3,
            param_attr=None,
            bias_attr=None)
        self.out_layer = FC(self.full_name(),
                            size=num_classes + 2,
                            bias_attr=None,
                            act='softmax')

        self.decoder_size = decoder_size
Exemplo n.º 16
0
 def __init__(self, name_scope):
     super(MODEL, self).__init__(name_scope)
     self.conv1 = Conv2D('conv2d1_',
                         num_filters=40,
                         filter_size=5,
                         stride=1,
                         padding=2,
                         act='relu')
     self.conv2 = Conv2D('conv2d2_',
                         num_filters=50,
                         filter_size=5,
                         stride=1,
                         padding=2,
                         act='relu')
     self.conv3 = Conv2D('conv2d3_',
                         num_filters=70,
                         filter_size=2,
                         stride=1,
                         padding=1,
                         act='relu')
     self.conv4 = Conv2D('conv2d4_',
                         num_filters=100,
                         filter_size=2,
                         stride=1,
                         padding=1,
                         act='relu')
     self.pool2d1 = Pool2D('pool2d1_',
                           pool_size=2,
                           pool_stride=2,
                           pool_type='max')
     self.pool2d2 = Pool2D('pool2d2_',
                           pool_size=2,
                           pool_stride=2,
                           pool_type='max')
     self.pool2d3 = Pool2D('pool2d3_',
                           pool_size=2,
                           pool_stride=2,
                           pool_type='max')
     self.fc1 = FC('fc1_', size=100, act='relu')
     self.fc2 = FC('fc2_', size=10, act='softmax')
Exemplo n.º 17
0
 def __init__(self,
              name_scope,
              conv_arch=((2, 64), (2, 128), (3, 256), (3, 512), (3, 512))):
     super(VGG, self).__init__(name_scope)
     self.vgg_blocks = []
     iter_id = 0
     # 添加vgg_block
     # 这里一共5个vgg_block,每个block里面的卷积层数目和输出通道数由conv_arch指定
     for (num_convs, num_channels) in conv_arch:
         block = self.add_sublayer(
             'block_' + str(iter_id),
             vgg_block(self.full_name(), num_convs, num_channels))
         self.vgg_blocks.append(block)
         iter_id += 1
     self.fc1 = FC(self.full_name(), size=4096, act='relu')
     self.drop1_ratio = 0.5
     self.fc2 = FC(self.full_name(), size=4096, act='relu')
     self.drop2_ratio = 0.5
     self.fc3 = FC(
         self.full_name(),
         size=1,
     )
Exemplo n.º 18
0
 def __init__(self,
              name_scope,
              conv_arch=((32, 5, 2), (64, 3, 1), (128, 3, 1), (256, 3, 1))):
     super(VGG, self).__init__(name_scope)
     self.vgg_blocks = []
     iter_id = 0
     # 添加vgg_block
     # 这里一共5个vgg_block,每个block里面的卷积层数目和输出通道数由conv_arch指定
     for (num_filters, filter_size, padding) in conv_arch:
         block = self.add_sublayer(
             'block_' + str(iter_id),
             vgg_block(self.full_name(),
                       num_filters=num_filters,
                       filter_size=filter_size,
                       padding=padding))
         self.vgg_blocks.append(block)
         iter_id += 1
     self.fc0 = FC(self.full_name(), size=512, act='relu')
     self.fc1 = FC(self.full_name(), size=10, act='softmax')
     self.fc2 = FC(self.full_name(), size=10, act='softmax')
     self.fc3 = FC(self.full_name(), size=10, act='softmax')
     self.fc4 = FC(self.full_name(), size=10, act='softmax')
Exemplo n.º 19
0
 def __init__(self, name_scope, num_classes):
     super(LeNet5, self).__init__(name_scope)
     self.conv1 = Conv2D(self.full_name(),
                         num_filters=50,
                         filter_size=5,
                         stride=1)
     self.pool1 = Pool2D(self.full_name(),
                         pool_size=2,
                         pool_stride=1,
                         pool_type='max')
     self.conv2 = Conv2D(self.full_name(),
                         num_filters=32,
                         filter_size=3,
                         stride=1)
     self.pool2 = Pool2D(self.full_name(),
                         pool_size=2,
                         pool_stride=1,
                         pool_type='max')
     self.fc1 = FC(self.full_name(), size=num_classes, act='softmax')
Exemplo n.º 20
0
    def __init__(self, name_scope):
        super(generator, self).__init__(name_scope)

        self.fc1 = FC(self.full_name(),
                      size=7 * 7 * 64,
                      param_attr=fluid.initializer.Xavier(),
                      bias_attr=False)

        self.bn1 = BatchNorm(self.full_name(),
                             momentum=0.99,
                             num_channels=7 * 7 * 64,
                             param_attr=fluid.initializer.Xavier(),
                             bias_attr=fluid.initializer.Xavier(),
                             trainable_statistics=True)

        self.deconv1 = DeConv2D(self.full_name(),
                                num_filters=64,
                                filter_size=5,
                                stride=1,
                                padding=[2, 2],
                                relu=True,
                                norm=True,
                                use_bias=False)

        self.deconv2 = DeConv2D(self.full_name(),
                                num_filters=32,
                                filter_size=5,
                                stride=2,
                                padding=[1, 1],
                                relu=True,
                                norm=True,
                                use_bias=False)

        self.deconv3 = DeConv2D(self.full_name(),
                                num_filters=1,
                                filter_size=5,
                                stride=2,
                                padding=[2, 2],
                                relu=False,
                                norm=False,
                                use_bias=False)
Exemplo n.º 21
0
 def __init__(self, name_scope, num_classes=1):
     super(GoogLeNet, self).__init__(name_scope)
     # GoogLeNet包含五个模块,每个模块后面紧跟一个池化层
     # 第一个模块包含1个卷积层
     self.conv1 = Conv2D(self.full_name(), num_filters=64, filter_size=7, 
                         padding=3, act='relu')
     # 3x3最大池化
     self.pool1 = Pool2D(self.full_name(), pool_size=3, pool_stride=2,  
                         pool_padding=1, pool_type='max')
     # 第二个模块包含2个卷积层
     self.conv2_1 = Conv2D(self.full_name(), num_filters=64, 
                           filter_size=1, act='relu')
     self.conv2_2 = Conv2D(self.full_name(), num_filters=192, 
                           filter_size=3, padding=1, act='relu')
     # 3x3最大池化
     self.pool2 = Pool2D(self.full_name(), pool_size=3, pool_stride=2,  
                         pool_padding=1, pool_type='max')
     # 第三个模块包含2个Inception块
     self.block3_1 = Inception(self.full_name(), 64, (96, 128), (16, 32), 32)
     self.block3_2 = Inception(self.full_name(), 128, (128, 192), (32, 96), 64)
     # 3x3最大池化
     self.pool3 = Pool2D(self.full_name(), pool_size=3, pool_stride=2,  
                            pool_padding=1, pool_type='max')
     # 第四个模块包含5个Inception块
     self.block4_1 = Inception(self.full_name(), 192, (96, 208), (16, 48), 64)
     self.block4_2 = Inception(self.full_name(), 160, (112, 224), (24, 64), 64)
     self.block4_3 = Inception(self.full_name(), 128, (128, 256), (24, 64), 64)
     self.block4_4 = Inception(self.full_name(), 112, (144, 288), (32, 64), 64)
     self.block4_5 = Inception(self.full_name(), 256, (160, 320), (32, 128), 128)
     # 3x3最大池化
     self.pool4 = Pool2D(self.full_name(), pool_size=3, pool_stride=2,  
                            pool_padding=1, pool_type='max')
     # 第五个模块包含2个Inception块
     self.block5_1 = Inception(self.full_name(), 256, (160, 320), (32, 128), 128)
     self.block5_2 = Inception(self.full_name(), 384, (192, 384), (48, 128), 128)
     # 全局池化,尺寸用的是global_pooling,pool_stride不起作用
     self.pool5 = Pool2D(self.full_name(), pool_stride=1, 
                            global_pooling=True, pool_type='avg')
     self.fc = FC(self.full_name(),  size=num_classes)
Exemplo n.º 22
0
    def __init__(self, name_scope):
        super(discriminator, self).__init__(name_scope)

        # conv2d + leaky_relu + dropout
        self.conv1 = conv2d(self.full_name(),
                            num_filters=64,
                            filter_size=5,
                            stride=2,
                            padding=2,
                            relu=True,
                            dropout=0.3)

        # conv2d + leaky_relu
        self.conv2 = conv2d(self.full_name(),
                            num_filters=128,
                            filter_size=5,
                            stride=2,
                            relu=True,
                            padding=2)

        self.fc1 = FC(self.full_name(),
                      size=2,
                      param_attr=fluid.initializer.Xavier(),
                      bias_attr=fluid.initializer.Xavier())
Exemplo n.º 23
0
    def __init__(self, name_scope, layers=50, class_dim=10):
        super(SeResNeXt, self).__init__(name_scope)

        self.layers = layers
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(supported_layers, layers)

        if layers == 50:
            cardinality = 32
            reduction_ratio = 16
            depth = [3, 4, 6, 3]
            num_filters = [128, 256, 512, 1024]
            self.conv0 = ConvBNLayer(name_scope='convbn',
                                     num_channels=3,
                                     num_filters=64,
                                     filter_size=7,
                                     stride=2,
                                     act='relu')
            self.pool = Pool2D(name_scope='pool2d',
                               pool_size=3,
                               pool_stride=2,
                               pool_padding=1,
                               pool_type='max')
        elif layers == 101:
            cardinality = 32
            reduction_ratio = 16
            depth = [3, 4, 23, 3]
            num_filters = [128, 256, 512, 1024]
            self.conv0 = ConvBNLayer(name_scope='convbn',
                                     num_channels=3,
                                     num_filters=64,
                                     filter_size=7,
                                     stride=2,
                                     act='relu')
            self.pool = Pool2D(name_scope='pool2d',
                               pool_size=3,
                               pool_stride=2,
                               pool_padding=1,
                               pool_type='max')
        elif layers == 152:
            cardinality = 64
            reduction_ratio = 16
            depth = [3, 8, 36, 3]
            num_filters = [128, 256, 512, 1024]
            self.conv0 = ConvBNLayer(name_scope='convbn',
                                     num_channels=3,
                                     num_filters=64,
                                     filter_size=3,
                                     stride=2,
                                     act='relu')
            self.conv1 = ConvBNLayer(name_scope='convbn',
                                     num_channels=64,
                                     num_filters=64,
                                     filter_size=3,
                                     stride=1,
                                     act='relu')
            self.conv2 = ConvBNLayer(name_scope='convbn',
                                     num_channels=64,
                                     num_filters=128,
                                     filter_size=3,
                                     stride=1,
                                     act='relu')
            self.pool = Pool2D(name_scope='pool2d',
                               pool_size=3,
                               pool_stride=2,
                               pool_padding=1,
                               pool_type='max')

        self.bottleneck_block_list = []
        num_channels = 64
        if layers == 152:
            num_channels = 128
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(name_scope='bnb',
                                    num_channels=num_channels,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    cardinality=cardinality,
                                    reduction_ratio=reduction_ratio,
                                    shortcut=shortcut))
                num_channels = bottleneck_block._num_channels_out
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True

        self.pool2d_avg = Pool2D(name_scope='pool2d',
                                 pool_size=7,
                                 pool_type='avg',
                                 global_pooling=True)
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1

        self.out = FC('fc',
                      self.pool2d_avg_output,
                      class_dim,
                      param_attr=fluid.param_attr.ParamAttr(
                          initializer=fluid.initializer.Uniform(-stdv, stdv)))
Exemplo n.º 24
0
    def __init__(self, name_scope, param_attr=None, bias_attr=None):
        super(MLP, self).__init__(name_scope)

        self._fc1 = FC(self.full_name(), 10)
        self._fc2 = FC(self.full_name(), 10)
Exemplo n.º 25
0
 def __init__(self, name_scope):
     super(MNIST, self).__init__(name_scope)
     name_scope = self.full_name()
     # 定义一层全连接层,输出维度是1,激活函数为None,即不使用激活函数
     self.fc = FC(name_scope, size=1, act=None)
Exemplo n.º 26
0
 def __init__(self, name_scope):
     super(MNIST, self).__init__(name_scope)
     name_scope = self.full_name()
     self.fc = FC(name_scope, size=1, act=None)
Exemplo n.º 27
0
    def __init__(self, name_scope):
        super(MLP, self).__init__(name_scope)

        self._fc1 = FC(self.full_name(), 10)
        self._fc2 = FC(self.full_name(), 10)
Exemplo n.º 28
0
    def __init__(self, name_scope, embedding_dim):
        super(CNNEncoder, self).__init__(name_scope)

        self.fc = FC(self.full_name(), size=embedding_dim, num_flatten_dims=2)
Exemplo n.º 29
0
    def __init__(self, name_scope, units):
        super(BahdanauAttention, self).__init__(name_scope)

        self.W1 = FC(self.full_name(), size=units, num_flatten_dims=2)
        self.W2 = FC(self.full_name(), size=units, num_flatten_dims=2)
        self.V = FC(self.full_name(), size=1, num_flatten_dims=2)