예제 #1
0
 def __init__(self, name_scope, num_classes=1):
     super(LeNet, self).__init__(name_scope)
     name_scope = self.full_name()
     # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
     self.conv1 = Conv2D(name_scope,
                         num_filters=6,
                         filter_size=5,
                         act='sigmoid')
     self.pool1 = Pool2D(name_scope,
                         pool_size=2,
                         pool_stride=2,
                         pool_type='max')
     self.conv2 = Conv2D(name_scope,
                         num_filters=16,
                         filter_size=5,
                         act='sigmoid')
     self.pool2 = Pool2D(name_scope,
                         pool_size=2,
                         pool_stride=2,
                         pool_type='max')
     # 创建第3个卷积层
     self.conv3 = Conv2D(name_scope,
                         num_filters=120,
                         filter_size=4,
                         act='sigmoid')
     # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分裂标签的类别数
     self.fc1 = FC(name_scope, size=64, act='sigmoid')
     self.fc2 = FC(name_scope, size=num_classes)
예제 #2
0
파일: ops.py 프로젝트: ruyijidan/Stargan-v2
    def __init__(self, channels_in, channels_out, normalize=False, downsample=False, use_bias=True, sn=False,
                 act=None):
        super(ResBlock, self).__init__()
        self.channels_in = channels_in
        self.channels_out = channels_out
        self.normalize = normalize
        self.downsample = downsample
        self.use_bias = use_bias
        self.sn = sn
        self.skip_flag = channels_in != channels_out
        self.act = act
        if self.downsample:
            self.avg_pooling0 = Pool2D(pool_size=2, pool_type="avg", pool_stride=2,
                                       pool_padding=0, global_pooling=False)
            self.avg_pooling1 = Pool2D(pool_size=2, pool_type="avg", pool_stride=2,
                                       pool_padding=0, global_pooling=False)

        self.conv1 = Conv2D(num_channels=self.channels_in, num_filters=self.channels_in,
                            filter_size=3, padding=1, param_attr=weight_initializer,
                            bias_attr=bias_initializer, stride=1, act=self.act)
        self.conv2 = Conv2D(num_channels=self.channels_in, num_filters=self.channels_out,
                            filter_size=3, padding=1, param_attr=weight_initializer,
                            bias_attr=bias_initializer, stride=1, act=self.act)
        if self.normalize:
            self.norm1 = InstanceNorm(self.channels_in)
            self.norm2 = InstanceNorm(self.channels_in)

        if self.skip_flag:
            self.conv1x1 = Conv2D(num_channels=self.channels_in, num_filters=self.channels_out,
                                  filter_size=1, padding=0, param_attr=weight_initializer,
                                  bias_attr=bias_initializer_1x1,
                                  stride=1, act=self.act)
예제 #3
0
    def __init__(self, name_scope):
        super(MNIST, self).__init__(name_scope)
        name_scope = self.full_name()
        # 定义卷积层,输出特征通道num_filters设置为20,卷积核的大小filter_size为5,卷积步长stride=1,padding=2
        # 激活函数使用relu
        self.conv1 = Conv2D(num_channels=1,
                            num_filters=20,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        # 定义池化层,池化核pool_size=2,池化步长为2,选择最大池化方式
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # 定义卷积层,输出特征通道num_filters设置为20,卷积核的大小filter_size为5,卷积步长stride=1,padding=2
        self.conv2 = Conv2D(num_channels=20,
                            num_filters=20,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        # 定义池化层,池化核pool_size=2,池化步长为2,选择最大池化方式
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')

        # self.linear = Linear(20*7*7, 1, act=None)
        self.linear = Linear(20 * 7 * 7, 10, act='softmax')
예제 #4
0
 def __init__(self, layers=50, class_dim=1000):
     """
     layers, 网络层数,可以是50, 101或者152
     class_dim,分类标签的类别数
     """
     super(ResNet, self).__init__()
     self.layers = layers
     supported_layers = [50, 101, 152]
     assert layers in supported_layers, 'supported layers are {} but input layer is {}'.format(
         supported_layers, layers)
     if layers == 50:
         # ResNet50包含多个模块,其中第2到第5个模块分别包含3、4、6、3个残差块
         depth = [3, 4, 6, 3]
     elif layers == 101:
         # ResNet101包含多个模块,其中第2到第5个模块分别包含3、4、23、3个残差块
         depth = [3, 4, 23, 3]
     elif layers == 152:
         # ResNet152包含多个模块,其中第2到第5个模块分别包含3、8、36、3个残差块
         depth = [3, 8, 36, 3]
     # 残差块中使用到的卷积的输出通道数
     num_filters = [64, 128, 256, 512]
     # ResNet的第一个模块,包含1个7x7卷积,后面跟着1个最大池化层
     self.conv = ConvBNLayer(num_channels=3,
                             num_filters=64,
                             filter_size=7,
                             stride=2,
                             act='relu')
     self.pool2d_max = Pool2D(pool_size=3,
                              pool_stride=2,
                              pool_padding=1,
                              pool_type='max')
     # ResNet的第二到第五个残差块c2、c3、c4、c5(BottleNeckBlock也就是残差块)
     self.bottleneck_block_list = []
     num_channels = 64
     for block in range(len(depth)):
         shortcut = False
         for i in range(depth[block]):
             bottleneck_block = self.add_sublayer(
                 'bb_%d_%d' % (block, i),
                 BottleneckBlock(
                     num_channels=num_channels,
                     num_filters=num_filters[block],
                     stride=2 if i == 0 and block != 0 else
                     1,  # c3、c4、c5将会在第一个残差块使用stride=2;其余所有残差块stride=1
                     shortcut=shortcut))
             num_channels = bottleneck_block._num_channels_out
             self.bottleneck_block_list.append(bottleneck_block)
             shortcut = True
     # 在c5的输出特征图上使用全局池化
     self.pool2d_avg = Pool2D(pool_size=7,
                              pool_type='avg',
                              global_pooling=True)
     # stdv用来作为全连接层随机初始化参数的方差
     stdv = 1.0 / math.sqrt(2048 * 1.0)
     # 创建全连接层,输出大小为类别数目
     self.out = Linear(
         input_dim=2048,
         output_dim=class_dim,
         param_attr=fluid.param_attr.ParamAttr(
             initializer=fluid.initializer.Uniform(-stdv, stdv)))
예제 #5
0
 def __init__(self, in_channels, num_classes, name=None):
     super(InceptionAux, self).__init__()
     self.num_classes = num_classes
     self.pool0 = Pool2D(pool_size=5, pool_stride=3, pool_type='avg')
     self.conv0 = ConvBNLayer(in_channels, 128, 1, name=name + '.conv0')
     self.conv1 = ConvBNLayer(128, 768, 5, name=name + '.conv1')
     self.pool1 = Pool2D(global_pooling=True, pool_type='avg')
예제 #6
0
    def __init__(self):
        super(GoogLeNet,self).__init__()
        # 1
        self.conv1 = Conv2D(num_channels=3,num_filters=64,filter_size=7,padding=3,act='relu')
        self.pool1 = Pool2D(pool_size=3,pool_stride=2,pool_padding=1,pool_type='max')

        # 2
        self.conv2_1 = Conv2D(num_channels=64,num_filters=64,filter_size=1,act='relu')
        self.conv2_2 = Conv2D(num_channels=64,num_filters=192,filter_size=3,padding=1,act='relu')
        self.pool2 = Pool2D(pool_size=3,pool_stride=2,pool_padding=1,pool_type='max')


        # 3
        self.block3_1 = Inception(192, 64, (96, 128), (16, 32), 32)
        self.block3_2 = Inception(256, 128, (128, 192), (32, 96), 64)
        self.pool3 = Pool2D(pool_size=3, pool_stride=2,pool_padding=1, pool_type='max')

        # 4
        self.block4_1 = Inception(480, 192, (96, 208), (16, 48), 64)
        self.block4_2 = Inception(512, 160, (112, 224), (24, 64), 64)
        self.block4_3 = Inception(512, 128, (128, 256), (24, 64), 64)
        self.block4_4 = Inception(512, 112, (144, 288), (32, 64), 64)
        self.block4_5 = Inception(528, 256, (160, 320), (32, 128), 128)
        self.pool4 = Pool2D(pool_size=3, pool_stride=2,pool_padding=1, pool_type='max')

        # 5
        self.block5_1 = Inception(832,256,(160,320),(32,128),128)
        self.block5_2 = Inception(832,384,(192,384),(48,128),128)
        self.pool5 = Pool2D(pool_stride=1,global_pooling=True,pool_type='avg')



        self.fc = Linear(input_dim=1024,output_dim=1,act=None)
예제 #7
0
 def __init__(self, name_scope):
     super(MNIST, self).__init__(name_scope)
     name_scope = self.full_name()
     # 定义卷积层,输出通道20,卷积核大小为5,步长为1,padding为2,使用relu激活函数
     self.conv1 = Conv2D(name_scope,
                         num_filters=20,
                         filter_size=5,
                         stride=1,
                         padding=2,
                         act='relu')
     # 定义池化层,池化核为2,采用最大池化方式
     self.pool1 = Pool2D(name_scope,
                         pool_size=2,
                         pool_stride=2,
                         pool_type='max')
     # 定义卷积层,输出通道20,卷积核大小为5,步长为1,padding为2,使用relu激活函数
     self.conv2 = Conv2D(name_scope,
                         num_filters=20,
                         filter_size=5,
                         stride=1,
                         padding=2,
                         act='relu')
     # 定义池化层,池化核为2,采用最大池化方式
     self.pool2 = Pool2D(name_scope,
                         pool_size=2,
                         pool_stride=2,
                         pool_type='max')
     # 定义全连接层,输出节点数为10,激活函数使用softmax
     self.fc = FC(name_scope, size=10, act='softmax')
예제 #8
0
    def __init__(self, name_scope, config, mode):
        super(TSM_ResNet, self).__init__(name_scope)

        self.layers = config.MODEL.num_layers
        self.seg_num = config.MODEL.seg_num
        self.class_dim = config.MODEL.num_classes
        self.reshape_list = [
            config.MODEL.seglen * 3, config[mode.upper()]['target_size'],
            config[mode.upper()]['target_size']
        ]

        if self.layers == 50:
            depth = [3, 4, 6, 3]
        else:
            raise NotImplementedError
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool2d_max = Pool2D(pool_size=3,
                                 pool_stride=2,
                                 pool_padding=1,
                                 pool_type='max')

        self.bottleneck_block_list = []
        num_channels = 64

        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(num_channels=num_channels,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    shortcut=shortcut,
                                    seg_num=self.seg_num))
                num_channels = int(bottleneck_block._num_channels_out)
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True
        self.pool2d_avg = Pool2D(pool_size=7,
                                 pool_type='avg',
                                 global_pooling=True)

        import math
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.out = Linear(
            2048,
            self.class_dim,
            act="softmax",
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)),
            bias_attr=fluid.param_attr.ParamAttr(
                learning_rate=2.0, regularizer=fluid.regularizer.L2Decay(0.)))
예제 #9
0
    def __init__(self, name_scope, layers=50, class_dim=102):
        super(ResNet, self).__init__(name_scope)

        self.layers = layers
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(supported_layers, layers)

        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(
            self.full_name(),
            num_channels=3,
            num_filters=64,
            filter_size=7,
            stride=2,
            act='relu')
        self.pool2d_max = Pool2D(
            self.full_name(),
            pool_size=3,
            pool_stride=2,
            pool_padding=1,
            pool_type='max')

        self.bottleneck_block_list = []
        num_channels = 64
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(
                        self.full_name(),
                        num_channels=num_channels,
                        num_filters=num_filters[block],
                        stride=2 if i == 0 and block != 0 else 1,
                        shortcut=shortcut))
                num_channels = bottleneck_block._num_channels_out
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True

        self.pool2d_avg = Pool2D(
            self.full_name(), pool_size=7, pool_type='avg', global_pooling=True)

        import math
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.out = FC(self.full_name(),
                      size=class_dim,
                      act='softmax',
                      param_attr=fluid.param_attr.ParamAttr(
                          initializer=fluid.initializer.Uniform(-stdv, stdv)))
예제 #10
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNet, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2D(1, 6, 3, stride=1, padding=1),
                                   Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10, act=classifier_activation))
예제 #11
0
파일: resnet.py 프로젝트: xyzhou-puck/hapi
    def __init__(self, depth=50, num_classes=1000):
        super(ResNet, self).__init__()

        layer_config = {
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3],
        }
        assert depth in layer_config.keys(), \
            "supported depth are {} but input layer is {}".format(
                layer_config.keys(), depth)

        layers = layer_config[depth]
        num_in = [64, 256, 512, 1024]
        num_out = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool = Pool2D(pool_size=3,
                           pool_stride=2,
                           pool_padding=1,
                           pool_type='max')

        self.layers = []
        for idx, num_blocks in enumerate(layers):
            blocks = []
            shortcut = False
            for b in range(num_blocks):
                block = BottleneckBlock(
                    num_channels=num_in[idx] if b == 0 else num_out[idx] * 4,
                    num_filters=num_out[idx],
                    stride=2 if b == 0 and idx != 0 else 1,
                    shortcut=shortcut)
                blocks.append(block)
                shortcut = True
            layer = self.add_sublayer("layer_{}".format(idx),
                                      Sequential(*blocks))
            self.layers.append(layer)

        self.global_pool = Pool2D(pool_size=7,
                                  pool_type='avg',
                                  global_pooling=True)

        stdv = 1.0 / math.sqrt(2048 * 1.0)
        self.fc_input_dim = num_out[-1] * 4 * 1 * 1
        self.fc = Linear(
            self.fc_input_dim,
            num_classes,
            act='softmax',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
    def __init__(self, num_classes=10):
        super(AlexNet, self).__init__()

        # AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征
        # 与LeNet不同的是激活函数换成了‘relu’
        # 这里将conv1中的输入通道数设置成1,输入为(样本数m,1,28,28)
        self.conv1 = Conv2D(num_channels=1,
                            num_filters=10,
                            filter_size=11,
                            stride=1,
                            padding=3,
                            act='relu')
        # 输出为(m,10,24,24)
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # 输出为(m,10,12,12)
        self.conv2 = Conv2D(num_channels=10,
                            num_filters=100,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        # (m,100,12,12)
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # (m,100,6,6)
        self.conv3 = Conv2D(num_channels=100,
                            num_filters=200,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            act='relu')
        # (m,200,4,4)
        self.conv4 = Conv2D(num_channels=200,
                            num_filters=200,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        # (m,200,4,4)
        self.conv5 = Conv2D(num_channels=200,
                            num_filters=100,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        # (m,100,4,4)
        self.pool5 = Pool2D(pool_size=2, pool_stride=2,
                            pool_type='max')  # 相当于无效
        # (m,100,2,2)
        self.fc1 = Linear(input_dim=400, output_dim=64, act='relu')
        self.drop_ratio1 = 0.5
        self.fc2 = Linear(input_dim=64, output_dim=64, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=64, output_dim=num_classes)
예제 #13
0
 def __init__(self, num_classes=10):
     super(GoogLeNet, self).__init__()
     # GoogLeNet包含五个模块,每个模块后面紧跟一个池化层
     # 第一个模块包含1个卷积层以及3x3最大池化
     self.conv1 = Conv2D(num_channels=3,
                         num_filters=64,
                         filter_size=7,
                         padding=3,
                         act='relu')
     self.pool1 = Pool2D(pool_size=3,
                         pool_stride=2,
                         pool_padding=1,
                         pool_type='max')
     # 第二个模块包含2个卷积层以及3x3最大池化
     self.conv2_1 = Conv2D(num_channels=64,
                           num_filters=64,
                           filter_size=1,
                           act='relu')
     self.conv2_2 = Conv2D(num_channels=64,
                           num_filters=192,
                           filter_size=3,
                           padding=1,
                           act='relu')
     self.pool2 = Pool2D(pool_size=3,
                         pool_stride=2,
                         pool_padding=1,
                         pool_type='max')
     # 第三个模块包含2个Inception块以及3x3最大池化
     self.block3_1 = Inception(192, 64, (96, 128), (16, 32), 32)
     self.block3_2 = Inception(256, 128, (128, 192), (32, 96), 64)
     self.pool3 = Pool2D(pool_size=3,
                         pool_stride=2,
                         pool_padding=1,
                         pool_type='max')
     # 第四个模块包含5个Inception块以及3x3最大池化
     self.block4_1 = Inception(480, 192, (96, 208), (16, 48), 64)
     self.block4_2 = Inception(512, 160, (112, 224), (24, 64), 64)
     self.block4_3 = Inception(512, 128, (128, 256), (24, 64), 64)
     self.block4_4 = Inception(512, 112, (144, 288), (32, 64), 64)
     self.block4_5 = Inception(528, 256, (160, 320), (32, 128), 128)
     self.pool4 = Pool2D(pool_size=3,
                         pool_stride=2,
                         pool_padding=1,
                         pool_type='max')
     # 第五个模块包含2个Inception块
     self.block5_1 = Inception(832, 256, (160, 320), (32, 128), 128)
     self.block5_2 = Inception(832, 384, (192, 384), (48, 128), 128)
     # 全局平均池化,尺寸用的是global_pooling,pool_stride不起作用
     self.pool5 = Pool2D(pool_stride=1,
                         global_pooling=True,
                         pool_type='avg')
     self.fc = Linear(input_dim=1024, output_dim=num_classes, act='softmax')
 def __init__(self):
     super(MNIST, self).__init__()
     
     # 定义一个卷积层,使用relu激活函数
     self.conv1 = Conv2D(num_channels=1, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')
     # 定义一个池化层,池化核为2,步长为2,使用最大池化方式
     self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
     # 定义一个卷积层,使用relu激活函数
     self.conv2 = Conv2D(num_channels=20, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')
     # 定义一个池化层,池化核为2,步长为2,使用最大池化方式
     self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
     # 定义一个全连接层,输出节点数为10 
     self.fc = Linear(input_dim=980, output_dim=10, act='softmax')
예제 #15
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                weight_attr=conv2d_w1_attr,
                bias_attr=conv2d_b1_attr),
            Pool2D(
                pool_size=2, pool_type='max', pool_stride=2),
            Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0,
                weight_attr=conv2d_w2_attr,
                bias_attr=conv2d_b2_attr),
            Pool2D(
                pool_size=2, pool_type='max', pool_stride=2))

        self.fc = Sequential(
            Linear(
                in_features=400,
                out_features=120,
                weight_attr=fc_w1_attr,
                bias_attr=fc_b1_attr),
            Linear(
                in_features=120,
                out_features=84,
                weight_attr=fc_w2_attr,
                bias_attr=fc_b2_attr),
            Linear(
                in_features=84,
                out_features=num_classes,
                weight_attr=fc_w3_attr,
                bias_attr=fc_b3_attr),
            Softmax())
예제 #16
0
    def __init__(self, name_scope, layers, dropout_prob, class_dim=5):
        super(DenseNet, self).__init__(name_scope)

        self.layers = layers
        self.dropout_prob = dropout_prob

        layer_count_dict = {
            121: (32, [6, 12, 24, 16]),
            169: (32, [6, 12, 32, 32]),
            201: (32, [6, 12, 48, 32]),
            161: (48, [6, 12, 36, 24])
        }
        layer_conf = layer_count_dict[self.layers]

        self.conv1 = Conv2D(num_channels=3,
                            num_filters=layer_conf[0] * 2,
                            filter_size=7,
                            stride=2,
                            padding=3,
                            groups=1,
                            act=None,
                            bias_attr=False)

        self.pool1 = Pool2D(pool_size=3,
                            pool_padding=1,
                            pool_stride=2,
                            pool_type='max')
        channels = layer_conf[0] * 2

        self.convs = []
        for i in range(len(layer_conf[1]) - 1):
            conv_block = self.add_sublayer(
                'bb_%d_%d' % (i, i),
                LoopLayer(self.full_name(),
                          num_filters=layer_conf[0],
                          num_channels=channels,
                          block_num=layer_conf[1][i],
                          drop_out_prob=self.dropout_prob))
            channels = layer_conf[0]
            self.convs.append(conv_block)

        self.conv3 = DenseBlock(self.full_name(),
                                num_filters=layer_conf[1][-1],
                                num_channels=layer_conf[0],
                                block_num=layer_conf[0],
                                drop_out_prob=self.dropout_prob)

        self.pool2 = Pool2D(global_pooling=True, pool_type='avg')

        self.fc = Linear(input_dim=544, output_dim=class_dim, act='softmax')
예제 #17
0
    def __init__(self, num_class=10):
        super(LeNet, self).__init__()

        # 创建卷积和池化层块,每个卷积层sigmoid激活函数,后面加一个2x2的池化
        self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5,  act='sigmoid')
        self.pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)
        self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5,  act='sigmoid')
        self.pool2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)
        # 创建第3个卷积层,此卷积层不含有池化层
        self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=5, act='sigmoid')
        # 创建全连接层,卷积层的输出数据格式是[N,C,H,W],在输入全连接层的时候,会自动将数据拉平
        # 也就是对每个样本,自动将其转化为长度为K的向量,K=C×H×W ,一个mini-batch的数据维度变成了N×K的二维向量
        self.fc1 = Linear(input_dim=120, output_dim=64, act='sigmoid')
        self.fc2 = Linear(input_dim=64, output_dim=num_classes, act='softmax')
예제 #18
0
    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()

        # AlexNet与LeNet一样使用卷积层与池化层提取图像特征
        # 不同的是AlexNet激活函数换成了relu
        self.conv1 = Conv2D(num_channels=3,
                            num_filters=96,
                            filter_size=11,
                            stride=4,
                            padding=5,
                            act='relu')
        # 对于每一个卷积层,激活操作是包含在卷积层中的
        self.pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.conv2 = Conv2D(num_channels=96,
                            num_filters=256,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.conv3 = Conv2D(num_channels=256,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv4 = Conv2D(num_channels=384,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv5 = Conv2D(num_channels=384,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.pool5 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.fc1 = Linear(input_dim=12544, output_dim=4096, act='relu')
        self.drop_ratio1 = 0.5
        # AlexNet的一个改进就是引入了dropout
        # 在全连接之后使用dropout抑制过拟合
        self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=4096,
                          output_dim=num_classes,
                          act='softmax')
예제 #19
0
파일: 527228.py 프로젝트: zxy3/data_enhance
 def __init__(self, name_scope, c1, c2, c3, c4, **kwargs):
     '''
     Inception模块的实现代码,
     name_scope, 模块名称,数据类型为string
     c1,  图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数
     c2,图(b)中第二条支路卷积的输出通道数,数据类型是tuple或list, 
            其中c2[0]是1x1卷积的输出通道数,c2[1]是3x3
     c3,图(b)中第三条支路卷积的输出通道数,数据类型是tuple或list, 
            其中c3[0]是1x1卷积的输出通道数,c3[1]是3x3
     c4,  图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数
     '''
     super(Inception, self).__init__(name_scope)
     # 依次创建Inception块每条支路上使用到的操作
     self.p1_1 = Conv2D(self.full_name(), num_filters=c1, 
                        filter_size=1, act='relu')
     self.p2_1 = Conv2D(self.full_name(), num_filters=c2[0], 
                        filter_size=1, act='relu')
     self.p2_2 = Conv2D(self.full_name(), num_filters=c2[1], 
                        filter_size=3, padding=1, act='relu')
     self.p3_1 = Conv2D(self.full_name(), num_filters=c3[0], 
                        filter_size=1, act='relu')
     self.p3_2 = Conv2D(self.full_name(), num_filters=c3[1], 
                        filter_size=5, padding=2, act='relu')
     self.p4_1 = Pool2D(self.full_name(), pool_size=3, 
                        pool_stride=1,  pool_padding=1, 
                        pool_type='max')
     self.p4_2 = Conv2D(self.full_name(), num_filters=c4, 
                        filter_size=1, act='relu')
예제 #20
0
    def __init__(self, args):

        super(ResNet12, self).__init__()
        self.args = args
        if self.args.dataset == 'omniglot':
            input_channels = 1
        else:
            input_channels = 3
        if self.args.method == 'relationnet':
            pooltype = None
        else:
            pooltype = self.args.pooling_type
        self.res_block0 = Residual_Block(
            num_channels=input_channels,
            num_filters=self.args.resnet12_num_filters[0],
            pooltype=self.args.pooling_type)
        self.res_block1 = Residual_Block(
            num_channels=self.args.resnet12_num_filters[0],
            num_filters=self.args.resnet12_num_filters[1],
            pooltype=self.args.pooling_type)
        self.res_block2 = Residual_Block(
            num_channels=self.args.resnet12_num_filters[1],
            num_filters=self.args.resnet12_num_filters[2],
            pooltype=pooltype)
        self.res_block3 = Residual_Block(
            num_channels=self.args.resnet12_num_filters[2],
            num_filters=self.args.resnet12_num_filters[3],
            pooltype=pooltype)
        self.gap = Pool2D(pool_type='avg', global_pooling=True)
        if self.args.if_dropout:
            self.dropout = Dropout(p=0.5)
예제 #21
0
    def __init__(self,
                 name_scope,
                 input_channels,
                 filter_list,
                 pool_mode="avg"):
        super(InceptionBasic, self).__init__(name_scope)
        #1*1
        self.branch_1 = ConvBNLayer(self.full_name(),
                                    num_channels=input_channels,
                                    num_filters=filter_list[0],
                                    filter_size=1,
                                    stride=1,
                                    act='relu')

        #1*1 + 3*3
        self.branch_2_a = ConvBNLayer(self.full_name(),
                                      num_channels=input_channels,
                                      num_filters=filter_list[1],
                                      filter_size=1,
                                      stride=1,
                                      act='relu')
        self.branch_2_b = ConvBNLayer(self.full_name(),
                                      num_channels=filter_list[1],
                                      num_filters=filter_list[2],
                                      filter_size=3,
                                      stride=1,
                                      act='relu')  #注意padding = 1, 也就是(3-1)//2

        #1*1 + 3*3 + 3*3
        self.branch_3_a = ConvBNLayer(self.full_name(),
                                      num_channels=input_channels,
                                      num_filters=filter_list[3],
                                      filter_size=1,
                                      stride=1,
                                      act='relu')
        self.branch_3_b = ConvBNLayer(self.full_name(),
                                      num_channels=filter_list[3],
                                      num_filters=filter_list[4],
                                      filter_size=3,
                                      stride=1,
                                      act='relu')
        self.branch_3_c = ConvBNLayer(self.full_name(),
                                      num_channels=filter_list[4],
                                      num_filters=filter_list[5],
                                      filter_size=3,
                                      stride=1,
                                      act='relu')

        #avg_pool3*3 + 1*1
        self.branch_4_a = Pool2D(pool_size=3,
                                 pool_stride=1,
                                 pool_padding=1,
                                 pool_type=pool_mode,
                                 ceil_mode=True)
        self.branch_4_b = ConvBNLayer(self.full_name(),
                                      num_channels=input_channels,
                                      num_filters=filter_list[6],
                                      filter_size=1,
                                      stride=1,
                                      act='relu')
예제 #22
0
    def __init__(self, in_channels, name=None):
        super(InceptionD, self).__init__()
        self.branch3x3_1 = ConvBNLayer(in_channels,
                                       192,
                                       1,
                                       name=name + '.branch3x3_1')
        self.branch3x3_2 = ConvBNLayer(192,
                                       320,
                                       3,
                                       stride=2,
                                       name=name + '.branch3x3_2')

        self.branch7x7x3_1 = ConvBNLayer(in_channels,
                                         192,
                                         1,
                                         name=name + '.branch7x7x3_1')
        self.branch7x7x3_2 = ConvBNLayer(192,
                                         192, (1, 7),
                                         padding=(0, 3),
                                         name=name + '.branch7x7x3_2')
        self.branch7x7x3_3 = ConvBNLayer(192,
                                         192, (7, 1),
                                         padding=(3, 0),
                                         name=name + '.branch7x7x3_3')
        self.branch7x7x3_4 = ConvBNLayer(192,
                                         192,
                                         3,
                                         stride=2,
                                         name=name + '.branch7x7x3_4')

        self.branch_pool = Pool2D(pool_size=3, pool_stride=2, pool_type='max')
예제 #23
0
    def __init__(self, num_channels, num_filters, pooltype):

        super(Residual_Block, self).__init__()
        self.short_cut = BASIC_BLOCK(num_channels=num_channels,
                                     num_filters=num_filters,
                                     filter_size=1,
                                     padding=0)
        self.conv0 = BASIC_BLOCK(num_channels=num_channels,
                                 num_filters=num_filters,
                                 filter_size=3,
                                 padding=1)
        self.conv1 = BASIC_BLOCK(num_channels=num_filters,
                                 num_filters=num_filters,
                                 filter_size=3,
                                 padding=1)
        self.conv2 = BASIC_BLOCK(num_channels=num_filters,
                                 num_filters=num_filters,
                                 filter_size=3,
                                 padding=1)
        if pooltype:
            self.pooling = Pool2D(pool_size=2,
                                  pool_stride=2,
                                  pool_type=pooltype)
        else:
            self.pooling = None
예제 #24
0
    def __init__(self, name_scope, num_filter):
        super(conv_block_2, self).__init__(name_scope)

        self.conv1 = Conv2D(
            self.full_name(),
            num_filters=num_filter,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            use_cudnn=use_cudnn,
            param_attr=fluid.param_attr.ParamAttr(name="1_weights"),
            bias_attr=False)

        self.conv2 = Conv2D(
            self.full_name(),
            num_filters=num_filter,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            use_cudnn=use_cudnn,
            param_attr=fluid.param_attr.ParamAttr(name="2_weights"),
            bias_attr=False)

        self.pool1 = Pool2D(self.full_name(),
                            pool_size=2,
                            pool_type='max',
                            pool_stride=2,
                            use_cudnn=use_cudnn)

        self.num_filter = num_filter
예제 #25
0
 def __init__(self, name_scope, num_filters, filter_size, padding):
     """
     num_convs, 卷积层的数目
     num_filters, 卷积层的输出通道数,在同一个Incepition块内,卷积层输出通道数是一样的
     """
     super(vgg_block, self).__init__(name_scope)
     self.conv_list = []
     for i in range(2):
         conv_layer = self.add_sublayer(
             'conv_' + str(i),
             Conv2D(self.full_name(),
                    num_filters=num_filters,
                    filter_size=filter_size,
                    padding=padding))
         batch_norm = self.add_sublayer(
             'bn_' + str(i),
             BatchNorm(self.full_name(),
                       num_channels=num_filters,
                       act='relu'))
         self.conv_list.append(conv_layer)
         self.conv_list.append(batch_norm)
     self.pool = Pool2D(self.full_name(),
                        pool_stride=2,
                        pool_size=2,
                        pool_type='max')
예제 #26
0
 def __init__(self, c_cur, stride, method):
     super(MixedOp, self).__init__()
     self._method = method
     self._k = 4 if self._method == "PC-DARTS" else 1
     self.mp = Pool2D(
         pool_size=2,
         pool_stride=2,
         pool_type='max',
     )
     ops = []
     for primitive in PRIMITIVES:
         op = OPS[primitive](c_cur // self._k, stride, False)
         if 'pool' in primitive:
             gama = ParamAttr(
                 initializer=fluid.initializer.Constant(value=1),
                 trainable=False)
             beta = ParamAttr(
                 initializer=fluid.initializer.Constant(value=0),
                 trainable=False)
             BN = BatchNorm(c_cur // self._k,
                            param_attr=gama,
                            bias_attr=beta)
             op = fluid.dygraph.Sequential(op, BN)
         ops.append(op)
     self._ops = fluid.dygraph.LayerList(ops)
예제 #27
0
 def __init__(self, num_channels=64, num_filters=64, padding=0, pooltype=None, args=None):
     
     super(Conv_block, self).__init__()
     self.args = args
     self.conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=3, stride=1, padding=padding)
     self.batch_norm = BatchNorm(num_filters)
     self.pooling = Pool2D(pool_size=2, pool_stride=2, pool_type=pooltype)
예제 #28
0
    def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
        super(SELayer, self).__init__()

        self.pool2d_gap = Pool2D(pool_type='avg', global_pooling=True)

        self._num_channels = num_channels

        med_ch = int(num_channels / reduction_ratio)
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
        self.squeeze = Linear(num_channels,
                              med_ch,
                              act="relu",
                              param_attr=ParamAttr(
                                  initializer=fluid.initializer.Uniform(
                                      -stdv, stdv),
                                  name=name + "_sqz_weights"),
                              bias_attr=ParamAttr(name=name + '_sqz_offset'))

        stdv = 1.0 / math.sqrt(med_ch * 1.0)
        self.excitation = Linear(
            med_ch,
            num_filters,
            act="sigmoid",
            param_attr=ParamAttr(initializer=fluid.initializer.Uniform(
                -stdv, stdv),
                                 name=name + "_exc_weights"),
            bias_attr=ParamAttr(name=name + '_exc_offset'))
예제 #29
0
    def __init__(self, bottleneck_params, in_channels=3, class_dim=1024):
        super(Xception, self).__init__()

        self.convbn1 = ConvBN(in_channels, 32, 3, 2, act='relu')
        self.convbn2 = ConvBN(32, 64, 3, 1, act='relu')

        in_channel = 64
        self.entry_flow, in_channel = self.block_flow(
            block_num=bottleneck_params['entry_flow'][0],
            strides=bottleneck_params['entry_flow'][1],
            chns=bottleneck_params['entry_flow'][2],
            in_channel=in_channel)
            
        self.middle_flow, in_channel = self.block_flow(
            block_num=bottleneck_params['middle_flow'][0],
            strides=bottleneck_params['middle_flow'][1],
            chns=bottleneck_params['middle_flow'][2],
            in_channel=in_channel)
            
        self.exit_flow, in_channel = self.exit_block_flow(
            block_num=bottleneck_params['exit_flow'][0],
            strides=bottleneck_params['exit_flow'][1],
            chns=bottleneck_params['exit_flow'][2],
            in_channel=in_channel)

        self.pool = Pool2D(pool_size=7, pool_type='avg', global_pooling=True)

        self.feature_dim = 2048

        import math 
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.linear = Linear(self.feature_dim, class_dim, act='softmax',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
예제 #30
0
    def __init__(self,
                 scale=1.0,
                 num_classes=1000,
                 with_pool=True,
                 classifier_activation='softmax'):
        super(MobileNetV2, self).__init__()
        self.scale = scale
        self.num_classes = num_classes
        self.with_pool = with_pool

        bottleneck_params_list = [
            (1, 16, 1, 1),
            (6, 24, 2, 2),
            (6, 32, 3, 2),
            (6, 64, 4, 2),
            (6, 96, 3, 1),
            (6, 160, 3, 2),
            (6, 320, 1, 1),
        ]

        self._conv1 = ConvBNLayer(
            num_channels=3,
            num_filters=int(32 * scale),
            filter_size=3,
            stride=2,
            padding=1)

        self._invl = []
        i = 1
        in_c = int(32 * scale)
        for layer_setting in bottleneck_params_list:
            t, c, n, s = layer_setting
            i += 1
            tmp = self.add_sublayer(
                sublayer=InvresiBlocks(
                    in_c=in_c, t=t, c=int(c * scale), n=n, s=s),
                name='conv' + str(i))
            self._invl.append(tmp)
            in_c = int(c * scale)

        self._out_c = int(1280 * scale) if scale > 1.0 else 1280
        self._conv9 = ConvBNLayer(
            num_channels=in_c,
            num_filters=self._out_c,
            filter_size=1,
            stride=1,
            padding=0)

        if with_pool:
            self._pool2d_avg = Pool2D(pool_type='avg', global_pooling=True)

        if num_classes > 0:
            tmp_param = ParamAttr(name=self.full_name() + "fc10_weights")
            self._fc = Linear(
                self._out_c,
                num_classes,
                act=classifier_activation,
                param_attr=tmp_param,
                bias_attr=ParamAttr(name="fc10_offset"))