Ejemplo n.º 1
0
    def __init__(self):
        super(MNIST, self).__init__()

        self._simple_img_conv_pool_1 = SimpleImgConvPool(1,
                                                         20,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        self._simple_img_conv_pool_2 = SimpleImgConvPool(20,
                                                         50,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        self.pool_2_shape = 50 * 4 * 4
        SIZE = 10
        scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5
        self._fc = Linear(self.pool_2_shape,
                          10,
                          param_attr=fluid.param_attr.ParamAttr(
                              initializer=fluid.initializer.NormalInitializer(
                                  loc=0.0, scale=scale)),
                          act="softmax")
Ejemplo n.º 2
0
    def __init__(self):
        super(GoogLeNet,self).__init__()
        # 1
        self.conv1 = Conv2D(num_channels=3,num_filters=64,filter_size=7,padding=3,act='relu')
        self.pool1 = Pool2D(pool_size=3,pool_stride=2,pool_padding=1,pool_type='max')

        # 2
        self.conv2_1 = Conv2D(num_channels=64,num_filters=64,filter_size=1,act='relu')
        self.conv2_2 = Conv2D(num_channels=64,num_filters=192,filter_size=3,padding=1,act='relu')
        self.pool2 = Pool2D(pool_size=3,pool_stride=2,pool_padding=1,pool_type='max')


        # 3
        self.block3_1 = Inception(192, 64, (96, 128), (16, 32), 32)
        self.block3_2 = Inception(256, 128, (128, 192), (32, 96), 64)
        self.pool3 = Pool2D(pool_size=3, pool_stride=2,pool_padding=1, pool_type='max')

        # 4
        self.block4_1 = Inception(480, 192, (96, 208), (16, 48), 64)
        self.block4_2 = Inception(512, 160, (112, 224), (24, 64), 64)
        self.block4_3 = Inception(512, 128, (128, 256), (24, 64), 64)
        self.block4_4 = Inception(512, 112, (144, 288), (32, 64), 64)
        self.block4_5 = Inception(528, 256, (160, 320), (32, 128), 128)
        self.pool4 = Pool2D(pool_size=3, pool_stride=2,pool_padding=1, pool_type='max')

        # 5
        self.block5_1 = Inception(832,256,(160,320),(32,128),128)
        self.block5_2 = Inception(832,384,(192,384),(48,128),128)
        self.pool5 = Pool2D(pool_stride=1,global_pooling=True,pool_type='avg')



        self.fc = Linear(input_dim=1024,output_dim=1,act=None)
Ejemplo n.º 3
0
    def __init__(self, bottleneck_params, in_channels=3, class_dim=1024):
        super(Xception, self).__init__()

        self.convbn1 = ConvBN(in_channels, 32, 3, 2, act='relu')
        self.convbn2 = ConvBN(32, 64, 3, 1, act='relu')

        in_channel = 64
        self.entry_flow, in_channel = self.block_flow(
            block_num=bottleneck_params['entry_flow'][0],
            strides=bottleneck_params['entry_flow'][1],
            chns=bottleneck_params['entry_flow'][2],
            in_channel=in_channel)
            
        self.middle_flow, in_channel = self.block_flow(
            block_num=bottleneck_params['middle_flow'][0],
            strides=bottleneck_params['middle_flow'][1],
            chns=bottleneck_params['middle_flow'][2],
            in_channel=in_channel)
            
        self.exit_flow, in_channel = self.exit_block_flow(
            block_num=bottleneck_params['exit_flow'][0],
            strides=bottleneck_params['exit_flow'][1],
            chns=bottleneck_params['exit_flow'][2],
            in_channel=in_channel)

        self.pool = Pool2D(pool_size=7, pool_type='avg', global_pooling=True)

        self.feature_dim = 2048

        import math 
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.linear = Linear(self.feature_dim, class_dim, act='softmax',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
Ejemplo n.º 4
0
    def __init__(self,
                 scale=1.0,
                 num_classes=1000,
                 with_pool=True,
                 classifier_activation='softmax'):
        super(MobileNetV2, self).__init__()
        self.scale = scale
        self.num_classes = num_classes
        self.with_pool = with_pool

        bottleneck_params_list = [
            (1, 16, 1, 1),
            (6, 24, 2, 2),
            (6, 32, 3, 2),
            (6, 64, 4, 2),
            (6, 96, 3, 1),
            (6, 160, 3, 2),
            (6, 320, 1, 1),
        ]

        self._conv1 = ConvBNLayer(
            num_channels=3,
            num_filters=int(32 * scale),
            filter_size=3,
            stride=2,
            padding=1)

        self._invl = []
        i = 1
        in_c = int(32 * scale)
        for layer_setting in bottleneck_params_list:
            t, c, n, s = layer_setting
            i += 1
            tmp = self.add_sublayer(
                sublayer=InvresiBlocks(
                    in_c=in_c, t=t, c=int(c * scale), n=n, s=s),
                name='conv' + str(i))
            self._invl.append(tmp)
            in_c = int(c * scale)

        self._out_c = int(1280 * scale) if scale > 1.0 else 1280
        self._conv9 = ConvBNLayer(
            num_channels=in_c,
            num_filters=self._out_c,
            filter_size=1,
            stride=1,
            padding=0)

        if with_pool:
            self._pool2d_avg = Pool2D(pool_type='avg', global_pooling=True)

        if num_classes > 0:
            tmp_param = ParamAttr(name=self.full_name() + "fc10_weights")
            self._fc = Linear(
                self._out_c,
                num_classes,
                act=classifier_activation,
                param_attr=tmp_param,
                bias_attr=ParamAttr(name="fc10_offset"))
Ejemplo n.º 5
0
    def __init__(self, num_classes=1):
        super(AlexNet, self).__init__()

        # AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征
        # 与LeNet不同的是激活函数换成了‘relu’  作用:通过加权的输入进行非线性组合产生非线性决策边界
        self.conv1 = Conv2D(num_channels=3,
                            num_filters=96,
                            filter_size=11,
                            stride=4,
                            padding=5,
                            act='relu')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2,
                            pool_type='max')  #输出神经元个数
        self.conv2 = Conv2D(num_channels=96,
                            num_filters=256,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv3 = Conv2D(num_channels=256,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv4 = Conv2D(num_channels=384,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv5 = Conv2D(num_channels=384,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')

        self.fc1 = Linear(input_dim=12544, output_dim=4096,
                          act='relu')  # 7*7*256
        self.drop_ratio1 = 0.5
        self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=4096, output_dim=num_classes)
Ejemplo n.º 6
0
    def __init__(self, num_channels, reduction_ratio):

        super(SqueezeExcitation, self).__init__()
        self._num_channels = num_channels
        self._pool = Pool2D(pool_size=0, pool_type='avg', global_pooling=True)
        self._squeeze = Linear(
            num_channels,
            num_channels // reduction_ratio,
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
                value=0.05)),
            act='relu')
        self._excitation = Linear(
            num_channels // reduction_ratio,
            num_channels,
            param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
                value=0.05)),
            act='sigmoid')
Ejemplo n.º 7
0
 def __init__(self, conv_arch=((2, 64), (2, 128), (3, 256), (3, 512), (3, 512)), num_classes=1000):
     super(VGG, self).__init__()
     self.vgg_blocks = []
     iter_id = 0
     # 添加vgg_block
     # 这里一共5个vgg_block,每个block里面的卷积层数目和输出通道数由conv_arch指定
     in_channels = [3, 64, 128, 256, 512, 512]
     for (num_convs, num_channels) in conv_arch:
         block = self.add_sublayer('block'+str(iter_id),
                                   vgg_block(num_convs, in_channels=in_channels[iter_id], out_channels=num_channels))
         self.vgg_blocks.append(block)
         iter_id += 1
     self.fc1 = Linear(input_dim=512*7*7, output_dim=4096, act='relu')
     self.drop_ratio1 = 0.5
     self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
     self.drop_ratio2 = 0.5
     self.fc3 = Linear(input_dim=4096, output_dim=num_classes, act='softmax')
Ejemplo n.º 8
0
    def __init__(self, name_scope, config, mode):
        super(TSM_ResNet, self).__init__(name_scope)

        self.layers = config.MODEL.num_layers
        self.seg_num = config.MODEL.seg_num
        self.class_dim = config.MODEL.num_classes
        self.reshape_list = [
            config.MODEL.seglen * 3, config[mode.upper()]['target_size'],
            config[mode.upper()]['target_size']
        ]

        if self.layers == 50:
            depth = [3, 4, 6, 3]
        else:
            raise NotImplementedError
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool2d_max = Pool2D(pool_size=3,
                                 pool_stride=2,
                                 pool_padding=1,
                                 pool_type='max')

        self.bottleneck_block_list = []
        num_channels = 64

        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(num_channels=num_channels,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    shortcut=shortcut,
                                    seg_num=self.seg_num))
                num_channels = int(bottleneck_block._num_channels_out)
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True
        self.pool2d_avg = Pool2D(pool_size=7,
                                 pool_type='avg',
                                 global_pooling=True)

        import math
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.out = Linear(
            2048,
            self.class_dim,
            act="softmax",
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)),
            bias_attr=fluid.param_attr.ParamAttr(
                learning_rate=2.0, regularizer=fluid.regularizer.L2Decay(0.)))
Ejemplo n.º 9
0
    def __init__(self, layers=50, class_dim=100):
        super(ResNet, self).__init__()

        self.layers = layers
        supported_layers = [34, 50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(supported_layers, layers)

        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_channels = [64, 256, 512, 1024]
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(
            num_channels=3,
            num_filters=64,
            filter_size=7,
            stride=1,
            #stride=2, test w.o downsample
            act='relu')
        self.pool2d_max = Pool2D(pool_size=3,
                                 pool_stride=2,
                                 pool_padding=1,
                                 pool_type='max')

        self.bottleneck_block_list = []
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(num_channels=num_channels[block]
                                    if i == 0 else num_filters[block] * 4,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    shortcut=shortcut))
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True

        self.pool2d_avg = Pool2D(pool_size=7,
                                 pool_type='avg',
                                 global_pooling=True)

        self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1

        import math
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.out = Linear(
            self.pool2d_avg_output,
            class_dim,
            act='softmax',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
Ejemplo n.º 10
0
    def __init__(self,
                 rnn_hidden_size=Config.encoder_size,
                 is_test=False,
                 use_cudnn=True):
        super(EncoderNet, self).__init__()
        self.rnn_hidden_size = rnn_hidden_size
        para_attr = fluid.ParamAttr(
            initializer=fluid.initializer.Normal(0.0, 0.02))
        bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Normal(
            0.0, 0.02),
                                    learning_rate=2.0)
        if fluid.framework.in_dygraph_mode():
            h_0 = np.zeros((Config.batch_size, rnn_hidden_size),
                           dtype="float32")
            h_0 = to_variable(h_0)
        else:
            h_0 = fluid.layers.fill_constant(
                shape=[Config.batch_size, rnn_hidden_size],
                dtype='float32',
                value=0)
        self.ocr_convs = OCRConv(is_test=is_test, use_cudnn=use_cudnn)

        self.fc_1_layer = Linear(768,
                                 rnn_hidden_size * 3,
                                 param_attr=para_attr,
                                 bias_attr=False)
        self.fc_2_layer = Linear(768,
                                 rnn_hidden_size * 3,
                                 param_attr=para_attr,
                                 bias_attr=False)
        self.gru_forward_layer = DynamicGRU(size=rnn_hidden_size,
                                            h_0=h_0,
                                            param_attr=para_attr,
                                            bias_attr=bias_attr,
                                            candidate_activation='relu')
        self.gru_backward_layer = DynamicGRU(size=rnn_hidden_size,
                                             h_0=h_0,
                                             param_attr=para_attr,
                                             bias_attr=bias_attr,
                                             candidate_activation='relu',
                                             is_reverse=True)

        self.encoded_proj_fc = Linear(rnn_hidden_size * 2,
                                      Config.decoder_size,
                                      bias_attr=False)
Ejemplo n.º 11
0
    def __init__(self, class_dim=1000, scale=1.0):
        super(MobileNetV2, self).__init__()
        self.scale = scale
        self.class_dim = class_dim

        bottleneck_params_list = [
            (1, 16, 1, 1),
            (6, 24, 2, 2),
            (6, 32, 3, 2),
            (6, 64, 4, 2),
            (6, 96, 3, 1),
            (6, 160, 3, 2),
            (6, 320, 1, 1),
        ]

        #1. conv1
        self._conv1 = ConvBNLayer(num_channels=3,
                                  num_filters=int(32 * scale),
                                  filter_size=3,
                                  stride=2,
                                  act=None,
                                  padding=1)

        #2. bottleneck sequences
        self._invl = []
        i = 1
        in_c = int(32 * scale)
        for layer_setting in bottleneck_params_list:
            t, c, n, s = layer_setting
            i += 1
            tmp = self.add_sublayer(sublayer=InvresiBlocks(in_c=in_c,
                                                           t=t,
                                                           c=int(c * scale),
                                                           n=n,
                                                           s=s),
                                    name='conv' + str(i))
            self._invl.append(tmp)
            in_c = int(c * scale)

        #3. last_conv
        self._out_c = int(1280 * scale) if scale > 1.0 else 1280
        self._conv9 = ConvBNLayer(num_channels=in_c,
                                  num_filters=self._out_c,
                                  filter_size=1,
                                  stride=1,
                                  act=None,
                                  padding=0)

        #4. pool
        self._pool2d_avg = Pool2D(pool_type='avg', global_pooling=True)

        #5. fc
        tmp_param = ParamAttr(name=self.full_name() + "fc10_weights")
        self._fc = Linear(self._out_c,
                          class_dim,
                          param_attr=tmp_param,
                          bias_attr=ParamAttr(name="fc10_offset"))
Ejemplo n.º 12
0
 def __init__(self, dict_dim, batch_size, seq_len):
     super(BOW, self).__init__()
     self.dict_dim = dict_dim
     self.emb_dim = 128
     self.hid_dim = 128
     self.fc_hid_dim = 96
     self.class_dim = 2
     self.batch_size = batch_size
     self.seq_len = seq_len
     self.embedding = Embedding(
         size=[self.dict_dim + 1, self.emb_dim],
         dtype='float32',
         is_sparse=False)
     self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim, act="tanh")
     self._fc2 = Linear(input_dim = self.hid_dim, output_dim=self.fc_hid_dim, act="tanh")
     self._fc_prediction = Linear(input_dim = self.fc_hid_dim,
                              output_dim = self.class_dim,
                              act="softmax")
Ejemplo n.º 13
0
    def __init__(self,
                 nc,
                 ndf=64,
                 n_layers=3,
                 stride=2,
                 get_intern_feat=False,
                 adaptive_layers=1):
        super(AdaptiveDiscriminator, self).__init__()
        self.get_intern_feat = get_intern_feat
        self.n_layers = n_layers
        self.adaptive_layers = adaptive_layers
        self.nc = nc
        self.ndf = ndf

        self.sw = cfg.FINESIZE // 8
        self.sh = int(self.sw / cfg.ASPECTRATIO)
        self.ch = self.sh * self.sw

        nf = ndf
        fc_0 = self.add_sublayer('fc_0', Linear(self.ch, nc * 4**2))
        encoder_0 = self.add_sublayer(
            'encoder_0', Conv4x4(nc, nf, stride=2, act='leaky_relu'))
        self.fcs = [fc_0]
        self.encoders = [encoder_0]
        for n in range(1, self.adaptive_layers):
            nf_prev = nf
            nf = min(nf * 2, 512)
            fc = self.add_sublayer('fc_%d' % n, Linear(self.ch,
                                                       nf_prev * 4**2))
            encoder = self.add_sublayer(
                'encoder_' % n, Conv4x4(nf_prev,
                                        nf,
                                        stride=2,
                                        act='leaky_relu'))
            self.fcs.append(fc)
            self.encoders.append(encoder)

        self.determined_convs = []
        for n in range(self.add_parameter, self.n_layers + 1):
            nf_prev = nf
            nf = min(nf * 2, 512)
            conv = self.add_sublayer('conv_%d' % n,
                                     Conv4x4BNReLU(nf_prev, nf, stride=stride))
            self.determined_convs.append(conv)
Ejemplo n.º 14
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(num_channels=1,
                   num_filters=6,
                   filter_size=3,
                   stride=1,
                   padding=1,
                   param_attr=conv2d_w1_attr,
                   bias_attr=conv2d_b1_attr),
            Pool2D(pool_size=2, pool_type='max', pool_stride=2),
            Conv2D(num_channels=6,
                   num_filters=16,
                   filter_size=5,
                   stride=1,
                   padding=0,
                   param_attr=conv2d_w2_attr,
                   bias_attr=conv2d_b2_attr),
            Pool2D(pool_size=2, pool_type='max', pool_stride=2))

        self.fc = Sequential(
            Linear(input_dim=400,
                   output_dim=120,
                   param_attr=fc_w1_attr,
                   bias_attr=fc_b1_attr),
            Linear(input_dim=120,
                   output_dim=84,
                   param_attr=fc_w2_attr,
                   bias_attr=fc_b2_attr),
            Linear(input_dim=84,
                   output_dim=num_classes,
                   act=classifier_activation,
                   param_attr=fc_w3_attr,
                   bias_attr=fc_b3_attr))
Ejemplo n.º 15
0
 def __init__(self,
              conv_arch=((2, 64), (2, 128), (3, 256), (3, 512), (3, 512))):
     super(VGG, self).__init__()
     self.vgg_blocks = []
     iter_id = 0
     in_channels = [3, 64, 128, 256, 512, 512]
     for (num_convs, num_channels) in conv_arch:
         block = self.add_sublayer(
             'block_' + str(iter_id),
             vgg_block(num_convs,
                       in_channels=in_channels[iter_id],
                       out_channels=num_channels))
         self.vgg_blocks.append(block)
         iter_id += 1
     self.fc1 = Linear(input_dim=512 * 7 * 7, output_dim=4096, act='relu')
     self.drop1_ratio = 0.5
     self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
     self.drop2_ratio = 0.5
     self.fc3 = Linear(input_dim=4096, output_dim=1)
Ejemplo n.º 16
0
    def __init__(self, num_classes=1):
        super(LeNet, self).__init__()
        self.conv1 = Conv2D(num_channels=1,
                            num_filters=6,
                            filter_size=5,
                            act='sigmoid')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=6,
                            num_filters=16,
                            filter_size=5,
                            act='sigmoid')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv3 = Conv2D(num_channels=16,
                            num_filters=120,
                            filter_size=4,
                            act='sigmoid')

        self.fc1 = Linear(input_dim=120, output_dim=64, act='sigmoid')
        self.fc2 = Linear(input_dim=64, output_dim=num_classes)
Ejemplo n.º 17
0
 def __init__(self, vocab_size):
     super(LogisticModel, self).__init__()
     self.logit = Linear(
         input_dim=4096,
         output_dim=vocab_size,
         act=None,
         param_attr=fluid.ParamAttr(
             initializer=fluid.initializer.MSRAInitializer(uniform=False)),
         bias_attr=fluid.ParamAttr(
             initializer=fluid.initializer.MSRAInitializer(uniform=False)))
Ejemplo n.º 18
0
 def __init__(self, input_dim, channels, sn=False, epsilon=1e-5):
     super(AdaIN, self).__init__()
     self.channels = channels
     self.epsilon = epsilon
     self.input_dim = input_dim
     self.norm = Nop_InstanceNorm(self.input_dim)
     self.fc = Linear(64,
                      self.input_dim * 2,
                      param_attr=weight_initializer,
                      act=None)
 def __init__(self):
     super(SimpleNet, self).__init__()
     self.net_a = paddle.nn.Sequential(
         paddle.nn.Linear(10, 20),
         paddle.nn.Linear(20, 20), paddle.nn.Linear(20, 5))
     self.net_b = paddle.nn.Sequential(
         paddle.nn.Linear(10, 20),
         paddle.nn.Linear(20, 20), paddle.nn.Linear(20, 5))
     self.net_unused = Linear(10, 20)
     self.step = 0
Ejemplo n.º 20
0
    def __init__(self, encoder_size, decoder_size, num_classes):
        super(GRUDecoderWithAttention, self).__init__()
        self.simple_attention = SimpleAttention(decoder_size)

        self.fc_1_layer = Linear(input_dim=encoder_size * 2,
                                 output_dim=decoder_size * 3,
                                 bias_attr=False)
        self.fc_2_layer = Linear(input_dim=decoder_size,
                                 output_dim=decoder_size * 3,
                                 bias_attr=False)
        self.gru_unit = GRUUnit(size=decoder_size * 3,
                                param_attr=None,
                                bias_attr=None)
        self.out_layer = Linear(input_dim=decoder_size,
                                output_dim=num_classes + 2,
                                bias_attr=None,
                                act='softmax')

        self.decoder_size = decoder_size
Ejemplo n.º 21
0
    def __init__(self, num_channels, reduction_ratio):

        super(SqueezeExcitation, self).__init__()
        self._num_channels = num_channels
        self._pool = Pool2D(pool_size=0, pool_type='avg', global_pooling=True)
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
        self._fc = Linear(
            num_channels,
            num_channels // reduction_ratio,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)),
            act='relu')
        stdv = 1.0 / math.sqrt(num_channels / 16.0 * 1.0)
        self._excitation = Linear(
            num_channels // reduction_ratio,
            num_channels,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)),
            act='sigmoid')
Ejemplo n.º 22
0
    def __init__(self, name_scope, num_classes=1):
        super(LeNet, self).__init__(name_scope)

        self.conv1 = Conv2D(num_channels=1,
                            num_filters=6,
                            filter_size=5,
                            act='relu')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=6,
                            num_filters=16,
                            filter_size=5,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')

        # self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=4, act='relu')
        # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分裂标签的类别数
        self.fc1 = Linear(input_dim=16 * 5 * 5, output_dim=120, act='relu')
        self.fc2 = Linear(input_dim=120, output_dim=84, act='relu')
        self.fc3 = Linear(input_dim=84, output_dim=num_classes)
Ejemplo n.º 23
0
    def __init__(self, num_classes=1):
        super(AlexNet, self).__init__()

        self.conv1 = Conv2D(num_channels=3,
                            num_filters=96,
                            filter_size=11,
                            stride=4,
                            padding=5,
                            act='relu')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=96,
                            num_filters=256,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv3 = Conv2D(num_channels=256,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv4 = Conv2D(num_channels=384,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv5 = Conv2D(num_channels=384,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')

        self.fc1 = Linear(input_dim=12544, output_dim=4096, act='relu')
        self.drop_ratio1 = 0.5
        self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=4096, output_dim=num_classes)
Ejemplo n.º 24
0
    def __init__(self, depth=50, num_classes=1000):
        super(ResNet, self).__init__()

        layer_config = {
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3],
        }
        assert depth in layer_config.keys(), \
            "supported depth are {} but input layer is {}".format(
                layer_config.keys(), depth)

        layers = layer_config[depth]
        num_in = [64, 256, 512, 1024]
        num_out = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool = Pool2D(pool_size=3,
                           pool_stride=2,
                           pool_padding=1,
                           pool_type='max')

        self.layers = []
        for idx, num_blocks in enumerate(layers):
            blocks = []
            shortcut = False
            for b in range(num_blocks):
                block = BottleneckBlock(
                    num_channels=num_in[idx] if b == 0 else num_out[idx] * 4,
                    num_filters=num_out[idx],
                    stride=2 if b == 0 and idx != 0 else 1,
                    shortcut=shortcut)
                blocks.append(block)
                shortcut = True
            layer = self.add_sublayer("layer_{}".format(idx),
                                      Sequential(*blocks))
            self.layers.append(layer)

        self.global_pool = Pool2D(pool_size=7,
                                  pool_type='avg',
                                  global_pooling=True)

        stdv = 1.0 / math.sqrt(2048 * 1.0)
        self.fc_input_dim = num_out[-1] * 4 * 1 * 1
        self.fc = Linear(
            self.fc_input_dim,
            num_classes,
            act='softmax',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
Ejemplo n.º 25
0
    def __init__(self,
                 c_in,
                 num_classes,
                 layers,
                 method,
                 steps=4,
                 multiplier=4,
                 stem_multiplier=3):
        super(Network, self).__init__()
        self._c_in = c_in
        self._num_classes = num_classes
        self._layers = layers
        self._steps = steps
        self._multiplier = multiplier
        self._primitives = PRIMITIVES
        self._method = method

        c_cur = stem_multiplier * c_in
        self.stem = fluid.dygraph.Sequential(
            Conv2D(num_channels=3,
                   num_filters=c_cur,
                   filter_size=3,
                   padding=1,
                   param_attr=fluid.ParamAttr(initializer=MSRAInitializer()),
                   bias_attr=False),
            BatchNorm(num_channels=c_cur,
                      param_attr=fluid.ParamAttr(
                          initializer=ConstantInitializer(value=1)),
                      bias_attr=fluid.ParamAttr(
                          initializer=ConstantInitializer(value=0))))

        c_prev_prev, c_prev, c_cur = c_cur, c_cur, c_in
        cells = []
        reduction_prev = False
        for i in range(layers):
            if i in [layers // 3, 2 * layers // 3]:
                c_cur *= 2
                reduction = True
            else:
                reduction = False
            cell = Cell(steps, multiplier, c_prev_prev, c_prev, c_cur,
                        reduction, reduction_prev, method)
            reduction_prev = reduction
            cells.append(cell)
            c_prev_prev, c_prev = c_prev, multiplier * c_cur
        self.cells = fluid.dygraph.LayerList(cells)
        self.global_pooling = Pool2D(pool_type='avg', global_pooling=True)
        self.classifier = Linear(
            input_dim=c_prev,
            output_dim=num_classes,
            param_attr=ParamAttr(initializer=MSRAInitializer()),
            bias_attr=ParamAttr(initializer=MSRAInitializer()))

        self._initialize_alphas()
Ejemplo n.º 26
0
 def __init__(self):
     super(OCRAttention, self).__init__()
     self.encoder_net = EncoderNet()
     self.fc = Linear(Config.encoder_size,
                      Config.decoder_size,
                      bias_attr=False,
                      act='relu')
     self.embedding = Embedding(
         [Config.num_classes + 2, Config.word_vector_dim], dtype='float32')
     self.gru_decoder_with_attention = GRUDecoderWithAttention(
         Config.decoder_size, Config.num_classes)
Ejemplo n.º 27
0
    def forward(self, inputs):

        inputs = paddle.fluid.data(input)
        inputs = paddle.reshape(inputs, (-1, 784))
        zeros = paddle.tensor.zeros(shape=[1, 2],
                                    dtype="float32",
                                    force_cpu=False)

        inputs = Linear(700, 400)
        outputs = self.fc(inputs)
        return outputs
Ejemplo n.º 28
0
 def __init__(self, dict_dim, seq_len):
     super(GRU, self).__init__()
     self.dict_dim = dict_dim
     self.emb_dim = 128
     self.hid_dim = 128
     self.fc_hid_dim = 96
     self.class_dim = 3
     self.seq_len = seq_len
     self._fc1 = Linear(input_dim=self.hid_dim,
                        output_dim=self.fc_hid_dim,
                        act="tanh")
     self._fc_prediction = Linear(input_dim=self.fc_hid_dim,
                                  output_dim=self.class_dim,
                                  act="softmax")
     self._encoder = GRUEncoder(dict_size=self.dict_dim + 1,
                                emb_dim=self.emb_dim,
                                gru_dim=self.hid_dim,
                                hidden_dim=self.hid_dim,
                                padding_idx=None,
                                seq_len=self.seq_len)
Ejemplo n.º 29
0
    def __init__(self, input_dim, grnn_hidden_dim, init_bound, h_0=None):
        super(BiGRU, self).__init__()

        self.pre_gru = Linear(
            input_dim=input_dim,
            output_dim=grnn_hidden_dim * 3,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Uniform(low=-init_bound,
                                                      high=init_bound),
                regularizer=fluid.regularizer.L2DecayRegularizer(
                    regularization_coeff=1e-4)))  #,
        #num_flatten_dims=2)

        self.gru = DynamicGRU(
            size=grnn_hidden_dim,
            h_0=h_0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Uniform(low=-init_bound,
                                                      high=init_bound),
                regularizer=fluid.regularizer.L2DecayRegularizer(
                    regularization_coeff=1e-4)))

        self.pre_gru_r = Linear(
            input_dim=input_dim,
            output_dim=grnn_hidden_dim * 3,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Uniform(low=-init_bound,
                                                      high=init_bound),
                regularizer=fluid.regularizer.L2DecayRegularizer(
                    regularization_coeff=1e-4)))  #,
        #num_flatten_dims=2)

        self.gru_r = DynamicGRU(
            size=grnn_hidden_dim,
            is_reverse=True,
            h_0=h_0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Uniform(low=-init_bound,
                                                      high=init_bound),
                regularizer=fluid.regularizer.L2DecayRegularizer(
                    regularization_coeff=1e-4)))
Ejemplo n.º 30
0
 def __init__(self, features, num_classes=1000):
     super(VGG, self).__init__()
     self.features = features
     self.avgpool = Pool2D(pool_size=7, pool_stride=1, pool_type='avg')
     import math
     stdv = 1.0 / math.sqrt(2048 * 1.0)
     self.classifier = Linear(
         512,
         num_classes,
         act='softmax',
         param_attr=fluid.param_attr.ParamAttr(
             initializer=fluid.initializer.Uniform(-stdv, stdv)))