Example #1
0
    def __init__(self, num_classes=1000):
        super(MobileNetV3_Large, self).__init__()
        self.conv1 = Conv2D(3, 16, filter_size=3, stride=2, padding=1)
        self.bn1 = BatchNorm(16)
        self.hs1 = hswish()

        self.bneck = fluid.dygraph.Sequential(
            Block(3, 16, 16, 16, relu(), None, 1),
            Block(3, 16, 64, 24, relu(), None, 2),
            Block(3, 24, 72, 24, relu(), None, 1),
            Block(5, 24, 72, 40, relu(), SeModule(40), 2),
            Block(5, 40, 120, 40, relu(), SeModule(40), 1),
            Block(5, 40, 120, 40, relu(), SeModule(40), 1),
            Block(3, 40, 240, 80, hswish(), None, 2),
            Block(3, 80, 200, 80, hswish(), None, 1),
            Block(3, 80, 184, 80, hswish(), None, 1),
            Block(3, 80, 184, 80, hswish(), None, 1),
            Block(3, 80, 480, 112, hswish(), SeModule(112), 1),
            Block(3, 112, 672, 112, hswish(), SeModule(112), 1),
            Block(5, 112, 672, 160, hswish(), SeModule(160), 1),
            Block(5, 160, 672, 160, hswish(), SeModule(160), 2),
            Block(5, 160, 960, 160, hswish(), SeModule(160), 1),
        )


        self.conv2 = Conv2D(160, 960, filter_size=1, stride=1, padding=0)
        self.bn2 = BatchNorm(960)
        self.hs2 = hswish()
        self.linear3 = Linear(960, 1280)
        self.bn3 = BatchNorm(1280)
        self.hs3 = hswish()
        self.linear4 = Linear(1280, num_classes,act='softmax')
Example #2
0
 def __init__(self,
              dict_dim,
              emb_dim=128,
              hid_dim=128,
              fc_hid_dim=96,
              class_dim=2,
              channels=1,
              win_size=(3, 128)):
     super(CNN, self).__init__()
     self.dict_dim = dict_dim
     self.emb_dim = emb_dim
     self.hid_dim = hid_dim
     self.fc_hid_dim = fc_hid_dim
     self.class_dim = class_dim
     self.channels = channels
     self.win_size = win_size
     self.embedding = Embedding(size=[self.dict_dim + 1, self.emb_dim],
                                dtype='float64',
                                is_sparse=False,
                                padding_idx=0)
     self._conv2d = Conv2D(num_channels=self.channels,
                           num_filters=self.hid_dim,
                           filter_size=win_size,
                           padding=[1, 0],
                           use_cudnn=True,
                           act=None,
                           dtype="float64")
     self._fc_1 = Linear(input_dim=self.hid_dim,
                         output_dim=self.fc_hid_dim,
                         dtype="float64")
     self._fc_2 = Linear(input_dim=self.fc_hid_dim,
                         output_dim=self.class_dim,
                         act="softmax",
                         dtype="float64")
Example #3
0
    def __init__(self, num_classes=1000):
        super(MobileNetV3_Small, self).__init__()
        self.conv1 = Conv2D(3, 16, filter_size=3, stride=2, padding=1)
        self.bn1 = BatchNorm(16)
        self.hs1 = hswish()

        self.bneck = fluid.dygraph.Sequential(
            Block(3, 16, 16, 16, relu(), SeModule(16), 2),
            Block(3, 16, 72, 24, relu(), None, 2),
            Block(3, 24, 88, 24, relu(), None, 1),
            Block(5, 24, 96, 40, hswish(), SeModule(40), 2),
            Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
            Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
            Block(5, 40, 120, 48, hswish(), SeModule(48), 1),
            Block(5, 48, 144, 48, hswish(), SeModule(48), 1),
            Block(5, 48, 288, 96, hswish(), SeModule(96), 2),
            Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
            Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
        )


        self.conv2 = Conv2D(96, 576, filter_size=1, stride=1, padding=0)
        self.bn2 = BatchNorm(576)
        self.hs2 = hswish()
        self.linear3 = Linear(576, 1280)
        self.bn3 = BatchNorm(1280)
        self.hs3 = hswish()
        self.linear4 = Linear(1280, num_classes,act='softmax')
    def __init__(self):
        super(DNN, self).__init__()
        self.name = 'DNN_1024_512_256_dygraph'

        self.embeddings = [
            Embedding(
                size=[cfg.sparse_feature_dim, cfg.embedding_size],
                dtype='float32',
                padding_idx=0,
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.TruncatedNormalInitializer(
                        loc=0.0, scale=0.1 / math.sqrt(float(cfg.embedding_size)))))
            for _ in range(26)
        ]
       
        feature_size = 13 + 26 * cfg.embedding_size
        self.block_1 = Linear(
            feature_size, 1024, act='relu',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1 / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1)))
        
        self.block_2_1 = Linear(
            1024, 1024, act='relu',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1 / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1)))
        
        self.block_2_2 = Linear(
            1024, 512, act='relu',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1 / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1)))
        
        self.block_2_3 = Linear(
            512, 256, act='relu',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1 / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1)))
        
        self.block_3 = Linear(
            256, 2, act='softmax',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1 / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1)))
Example #5
0
    def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
        super(SELayer, self).__init__()

        self.pool2d_gap = Pool2D(pool_type='avg', global_pooling=True)

        self._num_channels = num_channels

        med_ch = int(num_channels / reduction_ratio)
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
        self.squeeze = Linear(num_channels,
                              med_ch,
                              act="relu",
                              param_attr=ParamAttr(
                                  initializer=fluid.initializer.Uniform(
                                      -stdv, stdv),
                                  name=name + "_sqz_weights"),
                              bias_attr=ParamAttr(name=name + '_sqz_offset'))

        stdv = 1.0 / math.sqrt(med_ch * 1.0)
        self.excitation = Linear(
            med_ch,
            num_filters,
            act="sigmoid",
            param_attr=ParamAttr(initializer=fluid.initializer.Uniform(
                -stdv, stdv),
                                 name=name + "_exc_weights"),
            bias_attr=ParamAttr(name=name + '_exc_offset'))
Example #6
0
 def __init__(self, dict_dim, batch_size, seq_len):
     super(BiGRU, self).__init__()
     self.dict_dim = dict_dim
     self.emb_dim = 128
     self.hid_dim = 128
     self.fc_hid_dim = 96
     self.class_dim = 2
     self.batch_size = batch_size
     self.seq_len = seq_len
     self.embedding = Embedding(
         size=[self.dict_dim + 1, self.emb_dim],
         dtype='float32',
         param_attr=fluid.ParamAttr(learning_rate=30),
         is_sparse=False)
     h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32")
     h_0 = to_variable(h_0)
     self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.hid_dim * 3)
     self._fc2 = Linear(input_dim=self.hid_dim * 2,
                        output_dim=self.fc_hid_dim,
                        act="tanh")
     self._fc_prediction = Linear(input_dim=self.fc_hid_dim,
                                  output_dim=self.class_dim,
                                  act="softmax")
     self._gru_forward = DynamicGRU(size=self.hid_dim,
                                    h_0=h_0,
                                    is_reverse=False)
     self._gru_backward = DynamicGRU(size=self.hid_dim,
                                     h_0=h_0,
                                     is_reverse=True)
Example #7
0
 def __init__(self, dict_dim, batch_size, seq_len):
     super(CNN, self).__init__()
     self.dict_dim = dict_dim
     self.emb_dim = 128
     self.hid_dim = 128
     self.fc_hid_dim = 96
     self.class_dim = 2
     self.channels = 1
     self.win_size = [3, self.hid_dim]
     self.batch_size = batch_size
     self.seq_len = seq_len
     self.embedding = Embedding(
         size=[self.dict_dim + 1, self.emb_dim],
         dtype='float32',
         is_sparse=False)
     self._simple_conv_pool_1 = SimpleConvPool(
         self.channels,
         self.hid_dim,
         self.win_size,
         batch_size=self.batch_size)
     self._fc1 = Linear(
         input_dim=self.hid_dim * self.seq_len,
         output_dim=self.fc_hid_dim,
         act="softmax")
     self._fc_prediction = Linear(
         input_dim=self.fc_hid_dim, output_dim=self.class_dim, act="softmax")
Example #8
0
 def __init__(self, name_scope):
     super(MNIST, self).__init__(name_scope)
     #两个隐含层
     self.fc1 = Linear(input_dim=784, output_dim=10, act='sigmoid')
     self.fc2 = Linear(input_dim=10, output_dim=10, act='sigmoid')
     #输出层,不用激活函数
     self.fc3 = Linear(input_dim=10, output_dim=1, act=None)
Example #9
0
    def __init__(self, name, cfg, mode='train'):
        super(AttentionCluster, self).__init__()
        self.name = name
        self.cfg = cfg
        self.mode = mode
        self.is_training = (mode == 'train')
        self.get_config()

        self.fc1 = Linear(
            input_dim=36864,
            output_dim=1024,
            act='tanh',
            param_attr=ParamAttr(
                name="fc1.weights",
                initializer=fluid.initializer.MSRA(uniform=False)),
            bias_attr=ParamAttr(name="fc1.bias",
                                initializer=fluid.initializer.MSRA()))
        self.fc2 = Linear(
            input_dim=1024,
            output_dim=4096,
            act='tanh',
            param_attr=ParamAttr(
                name="fc2.weights",
                initializer=fluid.initializer.MSRA(uniform=False)),
            bias_attr=ParamAttr(name="fc2.bias",
                                initializer=fluid.initializer.MSRA()))
Example #10
0
 def __init__(self, dict_dim, batch_size, seq_len):
     super(BiGRU, self).__init__()
     self.dict_dim = dict_dim
     self.emb_dim = 128
     self.hid_dim = 128
     self.fc_hid_dim = 96
     self.class_dim = 2
     self.batch_size = batch_size
     self.seq_len = seq_len
     self.embedding = Embedding(
         size=[self.dict_dim + 1, self.emb_dim],
         dtype='float32',
         param_attr=fluid.ParamAttr(learning_rate=30),
         is_sparse=False)
     h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32")
     h_0 = to_variable(h_0)
     self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.hid_dim * 3)
     self._fc2 = Linear(input_dim=self.hid_dim * 2,
                        output_dim=self.fc_hid_dim,
                        act="tanh")
     self._fc_prediction = Linear(input_dim=self.fc_hid_dim,
                                  output_dim=self.class_dim,
                                  act="softmax")
     self._encoder = BiGRUEncoder(grnn_hidden_dim=self.hid_dim,
                                  input_dim=self.hid_dim * 3,
                                  h_0=h_0,
                                  init_bound=0.1,
                                  is_bidirection=True)
Example #11
0
 def __init__(self, dict_dim, batch_size, seq_len):
     super(CNN, self).__init__()
     self.dict_dim = dict_dim
     self.emb_dim = 128
     self.hid_dim = 128
     self.fc_hid_dim = 96
     self.class_dim = 2
     self.channels = 1
     self.win_size = [3, self.hid_dim]
     self.batch_size = batch_size
     self.seq_len = seq_len
     self._encoder = CNNEncoder(dict_size=self.dict_dim + 1,
                                emb_dim=self.emb_dim,
                                seq_len=self.seq_len,
                                filter_size=self.win_size,
                                num_filters=self.hid_dim,
                                hidden_dim=self.hid_dim,
                                padding_idx=None,
                                act='tanh')
     self._fc1 = Linear(input_dim=self.hid_dim * self.seq_len,
                        output_dim=self.fc_hid_dim,
                        act="softmax")
     self._fc_prediction = Linear(input_dim=self.fc_hid_dim,
                                  output_dim=self.class_dim,
                                  act="softmax")
Example #12
0
    def __init__(self):
        super(SimpleFCNet, self).__init__()

        param_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(
            value=0.8))
        bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(
            value=0.5))
        self._fcs = []
        in_channel = IMAGE_SIZE
        for hidden_size in [10, 20, 30]:
            self._fcs.append(
                Linear(
                    in_channel,
                    hidden_size,
                    act='tanh',
                    param_attr=param_attr,
                    bias_attr=bias_attr))
            in_channel = hidden_size
        self._fcs.append(
            Linear(
                in_channel,
                CLASS_NUM,
                act='softmax',
                param_attr=param_attr,
                bias_attr=bias_attr))
Example #13
0
    def __init__(self):
        super(SimpleNetUnusedParam, self).__init__()
        self.net_a = Linear(input_dim=10, output_dim=20)
        self.net_b = Linear(input_dim=20, output_dim=5)
        self.net_c = Linear(input_dim=5, output_dim=10)

        self.net_d = Linear(input_dim=20, output_dim=10)
Example #14
0
    def __init__(self):
        super(DNNPlus, self).__init__()
        self.name = 'DNNPlus_' + str(cfg.embedding_size) + '_' + str(
            cfg.dnn_hidden_dims[0])
        self.init_value_ = 0.1
        self.embedding_w = Embedding(
            size=[cfg.num_feat + 1, 1],
            dtype='float32',
            padding_idx=0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=self.init_value_),
                regularizer=fluid.regularizer.L1DecayRegularizer(cfg.reg)))
        self.embedding = Embedding(
            size=[cfg.num_feat + 1, cfg.embedding_size],
            dtype='float32',
            padding_idx=0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0,
                    scale=self.init_value_ /
                    math.sqrt(float(cfg.embedding_size)))))

        self.first_order_act = fluid.layers.sigmoid

        sizes = [cfg.num_field * cfg.embedding_size
                 ] + cfg.deepfm_layer_sizes + [1]
        acts = ['relu' for _ in range(len(cfg.deepfm_layer_sizes))] + [None]
        w_scales = [
            self.init_value_ / math.sqrt(float(10))
            for _ in range(len(cfg.deepfm_layer_sizes))
        ] + [self.init_value_]

        self.second_order_fc = Linear(
            cfg.deepfm_layer_sizes[0],
            1,
            act='sigmoid',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=w_scales[0])),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=self.init_value_)))
        self.add_sublayer('secong_order', self.second_order_fc)

        self.linears = []
        for i in range(len(cfg.deepfm_layer_sizes) + 1):
            linear = Linear(
                sizes[i],
                sizes[i + 1],
                act=acts[i],
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.TruncatedNormalInitializer(
                        loc=0.0, scale=w_scales[i])),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.TruncatedNormalInitializer(
                        loc=0.0, scale=self.init_value_)))
            self.add_sublayer('linear_%d' % i, linear)
            self.linears.append(linear)
Example #15
0
 def __init__(self):
     super(MNIST, self).__init__()
     # 定义两层全连接隐含层,输出维度是10,激活函数为sigmoid
     self.fc1 = Linear(input_dim=784, output_dim=10,
                       act='sigmoid')  # 隐含层节点为10,可根据任务调整
     self.fc2 = Linear(input_dim=10, output_dim=10, act='sigmoid')
     # 定义一层全连接输出层,输出维度是1,不使用激活函数
     self.fc3 = Linear(input_dim=10, output_dim=1, act=None)
Example #16
0
    def __init__(self, decoder_size):
        super(SimpleAttention, self).__init__()

        self.fc_1 = Linear(decoder_size,
                           decoder_size,
                           act=None,
                           bias_attr=False)
        self.fc_2 = Linear(decoder_size, 1, act=None, bias_attr=False)
Example #17
0
 def __init__(self):
     super(MLP, self).__init__()
     SIZE = 10
     self._fc1 = Linear(784, 200, act="relu")
     self._fc2 = Linear(200, 200, act="relu")
     self._fc3 = Linear(200, 200, act="relu")
     self._fc4 = Linear(200, 10, act="softmax")
     self._fc5 = Linear(200, 10, act="softmax")
 def __init__(self, num_classes=8):
     super(Model, self).__init__()
     self.fc1 = Linear(input_dim=10, output_dim=32,
                       act='relu')  # 输入层和隐藏层的连接,隐藏层使用Sigmoid作为激活函数
     self.fc2 = Linear(input_dim=32, output_dim=16,
                       act='relu')  # 第一个隐藏层和第二个隐藏层之间的连接
     self.fc3 = Linear(input_dim=16, output_dim=num_classes,
                       act='sigmoid')  # 隐藏层和输出层的连接,输出层不使用激活函数
    def __init__(self, name_scope, num_classes=1):
        super(VAE_Linear, self).__init__(name_scope)

        # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
        self.fc1 = Linear(784, 400)
        self.fc21 = Linear(400, 20)
        self.fc22 = Linear(400, 20)

        self.fc3 = Linear(20, 400)
        self.fc4 = Linear(400, 784)
Example #20
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNet, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2D(1, 6, 3, stride=1, padding=1),
                                   Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10, act=classifier_activation))
    def __init__(self, num_classes=10):
        super(AlexNet, self).__init__()

        # AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征
        # 与LeNet不同的是激活函数换成了‘relu’
        # 这里将conv1中的输入通道数设置成1,输入为(样本数m,1,28,28)
        self.conv1 = Conv2D(num_channels=1,
                            num_filters=10,
                            filter_size=11,
                            stride=1,
                            padding=3,
                            act='relu')
        # 输出为(m,10,24,24)
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # 输出为(m,10,12,12)
        self.conv2 = Conv2D(num_channels=10,
                            num_filters=100,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        # (m,100,12,12)
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # (m,100,6,6)
        self.conv3 = Conv2D(num_channels=100,
                            num_filters=200,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            act='relu')
        # (m,200,4,4)
        self.conv4 = Conv2D(num_channels=200,
                            num_filters=200,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        # (m,200,4,4)
        self.conv5 = Conv2D(num_channels=200,
                            num_filters=100,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        # (m,100,4,4)
        self.pool5 = Pool2D(pool_size=2, pool_stride=2,
                            pool_type='max')  # 相当于无效
        # (m,100,2,2)
        self.fc1 = Linear(input_dim=400, output_dim=64, act='relu')
        self.drop_ratio1 = 0.5
        self.fc2 = Linear(input_dim=64, output_dim=64, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=64, output_dim=num_classes)
Example #22
0
 def __init__(self, args):
     super(Relation_module, self).__init__()
     self.args = args
     if 'imagenet' in self.args.dataset or 'cub' in self.args.dataset:
         linear_dim = 3*3*self.args.num_filters
     else:
         linear_dim = self.args.num_filters
     inp_channels = self.args.num_filters*2 if self.args.backbone=='Conv4' else self.args.resnet12_num_filters[-1]*2
     padding = 1 if self.args.dataset=='omniglot' else 0
     self.conv0 = Conv_block(num_channels=inp_channels, num_filters=64, padding=padding, pooltype=self.args.pooling_type, args=args)
     self.conv1 = Conv_block(num_channels=64, num_filters=64, padding=padding, pooltype=self.args.pooling_type, args=args)
     self.fc0 = Linear(linear_dim, 8)
     self.fc1 = Linear(8, 1)
Example #23
0
    def __init__(self):
        super(DRNN, self).__init__()
        self.name = 'DRNN_' + str(cfg.drnn_hidden_dim) + '_' + str(
            cfg.drnn_hidden_layer)

        self.embeddings = [
            Embedding(
                size=[cfg.sparse_feature_dim, cfg.embedding_size],
                dtype='float32',
                padding_idx=0,
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.TruncatedNormalInitializer(
                        loc=0.0,
                        scale=0.1 / math.sqrt(float(cfg.embedding_size)))))
            for _ in range(26)
        ]

        feature_size = 13 + 26 * cfg.embedding_size
        self.block_1 = Linear(
            feature_size,
            cfg.drnn_hidden_dim,
            act='relu',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1 / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1)))

        self.block_2 = Linear(
            cfg.drnn_hidden_dim,
            cfg.drnn_hidden_dim,
            act='relu',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1 / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1)))

        self.block_3 = Linear(
            cfg.drnn_hidden_dim,
            2,
            act='softmax',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1 / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=0.1)))
Example #24
0
    def __init__(self, num_class=10):
        super(LeNet, self).__init__()

        # 创建卷积和池化层块,每个卷积层sigmoid激活函数,后面加一个2x2的池化
        self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5,  act='sigmoid')
        self.pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)
        self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5,  act='sigmoid')
        self.pool2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)
        # 创建第3个卷积层,此卷积层不含有池化层
        self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=5, act='sigmoid')
        # 创建全连接层,卷积层的输出数据格式是[N,C,H,W],在输入全连接层的时候,会自动将数据拉平
        # 也就是对每个样本,自动将其转化为长度为K的向量,K=C×H×W ,一个mini-batch的数据维度变成了N×K的二维向量
        self.fc1 = Linear(input_dim=120, output_dim=64, act='sigmoid')
        self.fc2 = Linear(input_dim=64, output_dim=num_classes, act='softmax')
Example #25
0
    def __init__(self, decoder_size, num_classes):
        super(GRUDecoderWithAttention, self).__init__()
        self.simple_attention = SimpleAttention(decoder_size)

        self.fc_1_layer = Linear(
            Config.encoder_size * 2, decoder_size * 3, bias_attr=False)
        self.fc_2_layer = Linear(
            decoder_size, decoder_size * 3, bias_attr=False)
        self.gru_unit = GRUUnit(
            size=decoder_size * 3, param_attr=None, bias_attr=None)
        self.out_layer = Linear(
            decoder_size, num_classes + 2, bias_attr=None, act='softmax')

        self.decoder_size = decoder_size
Example #26
0
    def __init__(self):
        super(DRNN, self).__init__()
        self.name = 'DRNN_' + str(cfg.embedding_size) + '_' + str(
            cfg.drnn_hidden_dim) + '_' + str(cfg.drnn_hidden_layer)
        self.init_value_ = 0.1
        self.embedding = Embedding(
            size=[cfg.num_feat + 1, cfg.embedding_size],
            dtype='float32',
            padding_idx=0,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0,
                    scale=self.init_value_ /
                    math.sqrt(float(cfg.embedding_size)))))

        self.hidden_1 = Linear(
            cfg.num_field * cfg.embedding_size,
            cfg.drnn_hidden_dim,
            act='relu',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=self.init_value_ / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=self.init_value_)))
        self.add_sublayer('hidden_1', self.hidden_1)

        self.hidden_2 = Linear(
            cfg.drnn_hidden_dim,
            cfg.drnn_hidden_dim,
            act='relu',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=self.init_value_ / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=self.init_value_)))
        self.add_sublayer('hidden_2', self.hidden_2)

        self.hidden_3 = Linear(
            cfg.drnn_hidden_dim,
            1,
            act='sigmoid',
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=self.init_value_ / math.sqrt(float(10)))),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.TruncatedNormalInitializer(
                    loc=0.0, scale=self.init_value_)))
        self.add_sublayer('hidden_3', self.hidden_3)
Example #27
0
    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()

        # AlexNet与LeNet一样使用卷积层与池化层提取图像特征
        # 不同的是AlexNet激活函数换成了relu
        self.conv1 = Conv2D(num_channels=3,
                            num_filters=96,
                            filter_size=11,
                            stride=4,
                            padding=5,
                            act='relu')
        # 对于每一个卷积层,激活操作是包含在卷积层中的
        self.pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.conv2 = Conv2D(num_channels=96,
                            num_filters=256,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.conv3 = Conv2D(num_channels=256,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv4 = Conv2D(num_channels=384,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv5 = Conv2D(num_channels=384,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.pool5 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.fc1 = Linear(input_dim=12544, output_dim=4096, act='relu')
        self.drop_ratio1 = 0.5
        # AlexNet的一个改进就是引入了dropout
        # 在全连接之后使用dropout抑制过拟合
        self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=4096,
                          output_dim=num_classes,
                          act='softmax')
Example #28
0
    def __init__(self,
                 rnn_hidden_size=Config.encoder_size,
                 is_test=False,
                 use_cudnn=True):
        super(EncoderNet, self).__init__()
        self.rnn_hidden_size = rnn_hidden_size
        para_attr = fluid.ParamAttr(initializer=fluid.initializer.Normal(0.0,
                                                                         0.02))
        bias_attr = fluid.ParamAttr(
            initializer=fluid.initializer.Normal(0.0, 0.02), learning_rate=2.0)
        if fluid.framework.in_dygraph_mode():
            h_0 = np.zeros(
                (Config.batch_size, rnn_hidden_size), dtype="float32")
            h_0 = to_variable(h_0)
        else:
            h_0 = fluid.layers.fill_constant(
                shape=[Config.batch_size, rnn_hidden_size],
                dtype='float32',
                value=0)
        self.ocr_convs = OCRConv(
            is_test=is_test, use_cudnn=use_cudnn)

        self.fc_1_layer = Linear( 768,
                             rnn_hidden_size * 3,
                             param_attr=para_attr,
                             bias_attr=False )
        print( "weight", self.fc_1_layer.weight.shape )
        self.fc_2_layer = Linear( 768,
                             rnn_hidden_size * 3,
                             param_attr=para_attr,
                             bias_attr=False )
        self.gru_forward_layer = DynamicGRU(
            size=rnn_hidden_size,
            h_0=h_0,
            param_attr=para_attr,
            bias_attr=bias_attr,
            candidate_activation='relu')
        self.gru_backward_layer = DynamicGRU(
            size=rnn_hidden_size,
            h_0=h_0,
            param_attr=para_attr,
            bias_attr=bias_attr,
            candidate_activation='relu',
            is_reverse=True)

        self.encoded_proj_fc = Linear( rnn_hidden_size * 2,
                                  Config.decoder_size,
                                  bias_attr=False )
Example #29
0
    def __init__(self):
        super(MNIST, self).__init__()

        self._simple_img_conv_pool_1 = SimpleImgConvPool(1,
                                                         20,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        self._simple_img_conv_pool_2 = SimpleImgConvPool(20,
                                                         50,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        self.pool_2_shape = 50 * 4 * 4
        SIZE = 10
        scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5
        self._fc = Linear(self.pool_2_shape,
                          10,
                          param_attr=fluid.param_attr.ParamAttr(
                              initializer=fluid.initializer.NormalInitializer(
                                  loc=0.0, scale=scale)),
                          act="softmax")
Example #30
0
 def __init__(self, layers=50, class_dim=1000):
     """
     layers, 网络层数,可以是50, 101或者152
     class_dim,分类标签的类别数
     """
     super(ResNet, self).__init__()
     self.layers = layers
     supported_layers = [50, 101, 152]
     assert layers in supported_layers, 'supported layers are {} but input layer is {}'.format(
         supported_layers, layers)
     if layers == 50:
         # ResNet50包含多个模块,其中第2到第5个模块分别包含3、4、6、3个残差块
         depth = [3, 4, 6, 3]
     elif layers == 101:
         # ResNet101包含多个模块,其中第2到第5个模块分别包含3、4、23、3个残差块
         depth = [3, 4, 23, 3]
     elif layers == 152:
         # ResNet152包含多个模块,其中第2到第5个模块分别包含3、8、36、3个残差块
         depth = [3, 8, 36, 3]
     # 残差块中使用到的卷积的输出通道数
     num_filters = [64, 128, 256, 512]
     # ResNet的第一个模块,包含1个7x7卷积,后面跟着1个最大池化层
     self.conv = ConvBNLayer(num_channels=3,
                             num_filters=64,
                             filter_size=7,
                             stride=2,
                             act='relu')
     self.pool2d_max = Pool2D(pool_size=3,
                              pool_stride=2,
                              pool_padding=1,
                              pool_type='max')
     # ResNet的第二到第五个残差块c2、c3、c4、c5(BottleNeckBlock也就是残差块)
     self.bottleneck_block_list = []
     num_channels = 64
     for block in range(len(depth)):
         shortcut = False
         for i in range(depth[block]):
             bottleneck_block = self.add_sublayer(
                 'bb_%d_%d' % (block, i),
                 BottleneckBlock(
                     num_channels=num_channels,
                     num_filters=num_filters[block],
                     stride=2 if i == 0 and block != 0 else
                     1,  # c3、c4、c5将会在第一个残差块使用stride=2;其余所有残差块stride=1
                     shortcut=shortcut))
             num_channels = bottleneck_block._num_channels_out
             self.bottleneck_block_list.append(bottleneck_block)
             shortcut = True
     # 在c5的输出特征图上使用全局池化
     self.pool2d_avg = Pool2D(pool_size=7,
                              pool_type='avg',
                              global_pooling=True)
     # stdv用来作为全连接层随机初始化参数的方差
     stdv = 1.0 / math.sqrt(2048 * 1.0)
     # 创建全连接层,输出大小为类别数目
     self.out = Linear(
         input_dim=2048,
         output_dim=class_dim,
         param_attr=fluid.param_attr.ParamAttr(
             initializer=fluid.initializer.Uniform(-stdv, stdv)))