Esempio n. 1
0
    def __init__(self,
                 name_scope,
                 group,
                 out_ch,
                 channels,
                 act="relu",
                 is_test=False,
                 pool=True,
                 use_cudnn=True):
        super(ConvBNPool, self).__init__(name_scope)
        self.group = group
        self.pool = pool

        filter_size = 3
        conv_std_0 = (2.0 / (filter_size**2 * channels[0]))**0.5
        conv_param_0 = fluid.ParamAttr(
            initializer=fluid.initializer.Normal(0.0, conv_std_0))

        conv_std_1 = (2.0 / (filter_size**2 * channels[1]))**0.5
        conv_param_1 = fluid.ParamAttr(
            initializer=fluid.initializer.Normal(0.0, conv_std_1))

        self.conv_0_layer = Conv2D(
            self.full_name(),
            channels[0],
            out_ch[0],
            3,
            padding=1,
            param_attr=conv_param_0,
            bias_attr=False,
            act=None,
            use_cudnn=use_cudnn)
        self.bn_0_layer = BatchNorm(
            self.full_name(), out_ch[0], act=act, is_test=is_test)
        self.conv_1_layer = Conv2D(
            self.full_name(),
            num_channels=channels[1],
            num_filters=out_ch[1],
            filter_size=3,
            padding=1,
            param_attr=conv_param_1,
            bias_attr=False,
            act=None,
            use_cudnn=use_cudnn)
        self.bn_1_layer = BatchNorm(
            self.full_name(), out_ch[1], act=act, is_test=is_test)

        print( "pool", self.pool)
        if self.pool:
            self.pool_layer = Pool2D(
                self.full_name(),
                pool_size=2,
                pool_type='max',
                pool_stride=2,
                use_cudnn=use_cudnn,
                ceil_mode=True)
Esempio n. 2
0
    def __init__(self, name_scope, num_filter):
        super(conv_block_4, self).__init__(name_scope)

        self.conv1 = Conv2D(
            self.full_name(),
            num_filters=num_filter,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            use_cudnn=use_cudnn,
            param_attr=fluid.param_attr.ParamAttr(name="1_weights"),
            bias_attr=False)

        self.conv2 = Conv2D(
            self.full_name(),
            num_filters=num_filter,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            use_cudnn=use_cudnn,
            param_attr=fluid.param_attr.ParamAttr(name="2_weights"),
            bias_attr=False)

        self.conv3 = Conv2D(
            self.full_name(),
            num_filters=num_filter,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            use_cudnn=use_cudnn,
            param_attr=fluid.param_attr.ParamAttr(name="3_weights"),
            bias_attr=False)

        self.conv4 = Conv2D(
            self.full_name(),
            num_filters=num_filter,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            use_cudnn=use_cudnn,
            param_attr=fluid.param_attr.ParamAttr(name="4_weights"),
            bias_attr=False)

        self.pool1 = Pool2D(self.full_name(),
                            pool_size=2,
                            pool_type='max',
                            pool_stride=2,
                            use_cudnn=use_cudnn)

        self.num_filter = num_filter
Esempio n. 3
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNet, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2D(1, 6, 3, stride=1, padding=1),
                                   Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10, act=classifier_activation))
Esempio n. 4
0
    def __init__(self, in_size, reduction=4):
        super(SeModule, self).__init__()
        # self.avg_pool = nn.AdaptiveAvgPool2d(1)

        self.se = fluid.dygraph.Sequential(
            Conv2D(in_size, in_size // reduction, filter_size=1, stride=1, padding=0),
            BatchNorm(in_size // reduction,act='relu'),
            Conv2D(in_size // reduction, in_size, filter_size=1, stride=1, padding=0),
            BatchNorm(in_size),
            hsigmoid()
        )
    def __init__(self, num_classes=10):
        super(AlexNet, self).__init__()

        # AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征
        # 与LeNet不同的是激活函数换成了‘relu’
        # 这里将conv1中的输入通道数设置成1,输入为(样本数m,1,28,28)
        self.conv1 = Conv2D(num_channels=1,
                            num_filters=10,
                            filter_size=11,
                            stride=1,
                            padding=3,
                            act='relu')
        # 输出为(m,10,24,24)
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # 输出为(m,10,12,12)
        self.conv2 = Conv2D(num_channels=10,
                            num_filters=100,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        # (m,100,12,12)
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # (m,100,6,6)
        self.conv3 = Conv2D(num_channels=100,
                            num_filters=200,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            act='relu')
        # (m,200,4,4)
        self.conv4 = Conv2D(num_channels=200,
                            num_filters=200,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        # (m,200,4,4)
        self.conv5 = Conv2D(num_channels=200,
                            num_filters=100,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        # (m,100,4,4)
        self.pool5 = Pool2D(pool_size=2, pool_stride=2,
                            pool_type='max')  # 相当于无效
        # (m,100,2,2)
        self.fc1 = Linear(input_dim=400, output_dim=64, act='relu')
        self.drop_ratio1 = 0.5
        self.fc2 = Linear(input_dim=64, output_dim=64, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=64, output_dim=num_classes)
Esempio n. 6
0
 def __init__(self, num_classes=10):
     super(GoogLeNet, self).__init__()
     # GoogLeNet包含五个模块,每个模块后面紧跟一个池化层
     # 第一个模块包含1个卷积层以及3x3最大池化
     self.conv1 = Conv2D(num_channels=3,
                         num_filters=64,
                         filter_size=7,
                         padding=3,
                         act='relu')
     self.pool1 = Pool2D(pool_size=3,
                         pool_stride=2,
                         pool_padding=1,
                         pool_type='max')
     # 第二个模块包含2个卷积层以及3x3最大池化
     self.conv2_1 = Conv2D(num_channels=64,
                           num_filters=64,
                           filter_size=1,
                           act='relu')
     self.conv2_2 = Conv2D(num_channels=64,
                           num_filters=192,
                           filter_size=3,
                           padding=1,
                           act='relu')
     self.pool2 = Pool2D(pool_size=3,
                         pool_stride=2,
                         pool_padding=1,
                         pool_type='max')
     # 第三个模块包含2个Inception块以及3x3最大池化
     self.block3_1 = Inception(192, 64, (96, 128), (16, 32), 32)
     self.block3_2 = Inception(256, 128, (128, 192), (32, 96), 64)
     self.pool3 = Pool2D(pool_size=3,
                         pool_stride=2,
                         pool_padding=1,
                         pool_type='max')
     # 第四个模块包含5个Inception块以及3x3最大池化
     self.block4_1 = Inception(480, 192, (96, 208), (16, 48), 64)
     self.block4_2 = Inception(512, 160, (112, 224), (24, 64), 64)
     self.block4_3 = Inception(512, 128, (128, 256), (24, 64), 64)
     self.block4_4 = Inception(512, 112, (144, 288), (32, 64), 64)
     self.block4_5 = Inception(528, 256, (160, 320), (32, 128), 128)
     self.pool4 = Pool2D(pool_size=3,
                         pool_stride=2,
                         pool_padding=1,
                         pool_type='max')
     # 第五个模块包含2个Inception块
     self.block5_1 = Inception(832, 256, (160, 320), (32, 128), 128)
     self.block5_2 = Inception(832, 384, (192, 384), (48, 128), 128)
     # 全局平均池化,尺寸用的是global_pooling,pool_stride不起作用
     self.pool5 = Pool2D(pool_stride=1,
                         global_pooling=True,
                         pool_type='avg')
     self.fc = Linear(input_dim=1024, output_dim=num_classes, act='softmax')
 def __init__(self):
     super(MNIST, self).__init__()
     
     # 定义一个卷积层,使用relu激活函数
     self.conv1 = Conv2D(num_channels=1, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')
     # 定义一个池化层,池化核为2,步长为2,使用最大池化方式
     self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
     # 定义一个卷积层,使用relu激活函数
     self.conv2 = Conv2D(num_channels=20, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')
     # 定义一个池化层,池化核为2,步长为2,使用最大池化方式
     self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
     # 定义一个全连接层,输出节点数为10 
     self.fc = Linear(input_dim=980, output_dim=10, act='softmax')
Esempio n. 8
0
    def __init__(self, num_class=10):
        super(LeNet, self).__init__()

        # 创建卷积和池化层块,每个卷积层sigmoid激活函数,后面加一个2x2的池化
        self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5,  act='sigmoid')
        self.pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)
        self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5,  act='sigmoid')
        self.pool2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)
        # 创建第3个卷积层,此卷积层不含有池化层
        self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=5, act='sigmoid')
        # 创建全连接层,卷积层的输出数据格式是[N,C,H,W],在输入全连接层的时候,会自动将数据拉平
        # 也就是对每个样本,自动将其转化为长度为K的向量,K=C×H×W ,一个mini-batch的数据维度变成了N×K的二维向量
        self.fc1 = Linear(input_dim=120, output_dim=64, act='sigmoid')
        self.fc2 = Linear(input_dim=64, output_dim=num_classes, act='softmax')
Esempio n. 9
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 padding=0,
                 norm_layer='instance',
                 use_bias=True,
                 scale_factor=1,
                 stddev=0.02):
        super(SeparableConv2D, self).__init__()

        if use_bias == False:
            con_bias_attr = False
        else:
            con_bias_attr = fluid.ParamAttr(
                initializer=fluid.initializer.Constant(0.0))

        self.conv_sep = Conv2D(
            num_channels=num_channels,
            num_filters=num_channels * scale_factor,
            filter_size=filter_size,
            stride=stride,
            padding=padding,
            use_cudnn=use_cudnn,
            groups=num_channels,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NormalInitializer(loc=0.0,
                                                                scale=stddev)),
            bias_attr=con_bias_attr)

        self.norm = InstanceNorm(
            num_channels=num_filters,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(1.0)),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(0.0)),
        )

        self.conv_out = Conv2D(
            num_channels=num_channels * scale_factor,
            num_filters=num_filters,
            filter_size=1,
            stride=1,
            use_cudnn=use_cudnn,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NormalInitializer(loc=0.0,
                                                                scale=stddev)),
            bias_attr=con_bias_attr)
    def __init__ (self, input_channel, output_nc, config, norm_layer=InstanceNorm, dropout_rate=0, n_blocks=9, padding_type='reflect'):
        super(SubMobileResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == InstanceNorm
        else:
            use_bias = norm_layer == InstanceNorm

        self.model = fluid.dygraph.LayerList([Pad2D(paddings=[3, 3, 3, 3], mode="reflect"),
                           Conv2D(input_channel, config['channels'][0], filter_size=7, padding=0, use_cudnn=use_cudnn, bias_attr=use_bias),
                           norm_layer(config['channels'][0]),
                           ReLU()])

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2 ** i
            in_c = config['channels'][i]
            out_c = config['channels'][i + 1]
            self.model.extend([Conv2D(in_c * mult, out_c * mult * 2, filter_size=3, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
                               norm_layer(out_c * mult * 2),
                               ReLU()])

        mult = 2 ** n_downsampling

        in_c = config['channels'][2]
        for i in range(n_blocks):
            if len(config['channels']) == 6:
                offset = 0
            else:
                offset = i // 3
            out_c = config['channels'][offset + 3]
            self.model.extend([MobileResnetBlock(in_c * mult, out_c * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        if len(config['channels']) == 6:
            offset = 4
        else:
            offset = 6
        for i in range(n_downsampling):
            out_c = config['channels'][offset + i]
            mult = 2 ** (n_downsampling - i)
            output_size = (i + 1) * 128
            self.model.extend([Conv2DTranspose(in_c * mult, int(out_c * mult / 2), filter_size=3, output_size=output_size, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
#            self.model.extend([Conv2DTranspose(in_c * mult, int(out_c * mult / 2), filter_size=3, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
#                               Pad2D(paddings=[0, 1, 0, 1], mode='constant', pad_value=0.0),
                               norm_layer(int(out_c * mult / 2)),
                               ReLU()])
            in_c = out_c

        self.model.extend([Pad2D(paddings=[3, 3, 3, 3], mode="reflect")])
        self.model.extend([Conv2D(in_c, output_nc, filter_size=7, padding=0)])
Esempio n. 11
0
    def __init__(self,name_scope):
        super(MNIST,self).__init__(name_scope)
        name_scope = self.full_name()
        # 定义卷积层,输出特征通道num_filters设置为20,卷积核的大小filter_size为5,卷积步长stride=1,padding=2
        # 激活函数使用relu
        self.conv1 = Conv2D(num_channels=1,num_filters=20, filter_size=5, stride=1, padding=2, act='relu')
        # 定义池化层,池化核pool_size=2,池化步长为2,选择最大池化方式
        self.pool1 = Pool2D( pool_size=2, pool_stride=2, pool_type='max')
        # 定义卷积层,输出特征通道num_filters设置为20,卷积核的大小filter_size为5,卷积步长stride=1,padding=2
        self.conv2 = Conv2D(num_channels=20, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')
        # 定义池化层,池化核pool_size=2,池化步长为2,选择最大池化方式
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')

        # self.linear = Linear(20*7*7, 1, act=None)
        self.linear = Linear(20*7*7, 10, act='softmax')
Esempio n. 12
0
    def __init__(self):
        super(M_net, self).__init__()
        self.scale1 = Pool2D(pool_size=2, pool_type='avg', pool_stride=2, pool_padding=0)
        self.scale2 = Pool2D(pool_size=2, pool_type='avg', pool_stride=2, pool_padding=0)
        self.scale3 = Pool2D(pool_size=2, pool_type='avg', pool_stride=2, pool_padding=0)

        self.block1_conv1 = Conv_Block(3, 32, 3, 1)
        self.block1_conv2 = Conv_Block(32, 32, 3, 1)
        self.block1_pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)

        self.block2_input1 = Conv_Block(3, 64, 3, 1)
        self.block2_conv1 = Conv_Block(96, 64, 3, 1)
        self.block2_conv2 = Conv_Block(64, 64, 3, 1)
        self.block2_pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)

        self.block3_input1 = Conv_Block(3, 128, 3, 1)
        self.block3_conv1 = Conv_Block(192, 128, 3, 1)
        self.block3_conv2 = Conv_Block(128, 128, 3, 1)
        self.block3_pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)

        self.block4_input1 = Conv_Block(3, 256, 3, 1)
        self.block4_conv1 = Conv_Block(384, 256, 3, 1)
        self.block4_conv2 = Conv_Block(256, 256, 3, 1)
        self.block4_pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2, pool_padding=0)

        self.block5_conv1 = Conv_Block(256, 512, 3, 1)
        self.block5_conv2 = Conv_Block(512, 512, 3, 1)

        self.block6_dconv = Conv2DTranspose(512, 256, 2, stride=2)
        self.block6_conv1 = Conv_Block(512, 256, 3, 1)
        self.block6_conv2 = Conv_Block(256, 256, 3, 1)

        self.block7_dconv = Conv2DTranspose(256, 128, 2, stride=2)
        self.block7_conv1 = Conv_Block(256, 128, 3, 1)
        self.block7_conv2 = Conv_Block(128, 128, 3, 1)

        self.block8_dconv = Conv2DTranspose(128, 64, 2, stride=2)
        self.block8_conv1 = Conv_Block(128, 64, 3, 1)
        self.block8_conv2 = Conv_Block(64, 64, 3, 1)

        self.block9_dconv = Conv2DTranspose(64, 32, 2, stride=2)
        self.block9_conv1 = Conv_Block(64, 32, 3, 1)
        self.block9_conv2 = Conv_Block(32, 32, 3, 1)

        self.side63 = Conv2D(256, 2, 1, act='sigmoid')
        self.side73 = Conv2D(128, 2, 1, act='sigmoid')
        self.side83 = Conv2D(64, 2, 1, act='sigmoid')
        self.side93 = Conv2D(32, 2, 1, act='sigmoid')
Esempio n. 13
0
    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()

        # AlexNet与LeNet一样使用卷积层与池化层提取图像特征
        # 不同的是AlexNet激活函数换成了relu
        self.conv1 = Conv2D(num_channels=3,
                            num_filters=96,
                            filter_size=11,
                            stride=4,
                            padding=5,
                            act='relu')
        # 对于每一个卷积层,激活操作是包含在卷积层中的
        self.pool1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.conv2 = Conv2D(num_channels=96,
                            num_filters=256,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.conv3 = Conv2D(num_channels=256,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv4 = Conv2D(num_channels=384,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv5 = Conv2D(num_channels=384,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.pool5 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.fc1 = Linear(input_dim=12544, output_dim=4096, act='relu')
        self.drop_ratio1 = 0.5
        # AlexNet的一个改进就是引入了dropout
        # 在全连接之后使用dropout抑制过拟合
        self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=4096,
                          output_dim=num_classes,
                          act='softmax')
Esempio n. 14
0
    def architecture_init(self):
        # shared layers
        ch_in = self.channels
        shared_sub_layers = [
            Conv2D(num_channels=3,
                   num_filters=ch_in,
                   filter_size=3,
                   padding=1,
                   param_attr=weight_initializer,
                   bias_attr=bias_initializer,
                   stride=1,
                   act=None)
        ]
        for i in range(self.repeat_num):
            ch_out = min(ch_in * 2, self.max_conv_dim)
            sub_layer = ResBlock(ch_in,
                                 ch_out,
                                 normalize=False,
                                 downsample=True,
                                 sn=self.sn,
                                 act=None)
            ch_in = ch_out
            shared_sub_layers.append(sub_layer)
        shared_sub_layers.append(Leaky_Relu(alpha=0.2))
        shared_sub_layers.append(
            Conv2D(num_channels=self.max_conv_dim,
                   num_filters=self.max_conv_dim,
                   filter_size=4,
                   padding=0,
                   param_attr=weight_initializer,
                   bias_attr=bias_initializer,
                   stride=1,
                   act=None))
        shared_sub_layers.append(Leaky_Relu(alpha=0.2))

        shared_layers = Sequential(*shared_sub_layers)
        # unshared layers
        unshared_sub_layers = []
        for _ in range(self.num_domains):
            sub_layer = Linear(self.max_conv_dim,
                               self.style_dim,
                               param_attr=weight_initializer,
                               bias_attr=bias_initializer,
                               act=None)
            unshared_sub_layers.append(sub_layer)

        unshared_layers = Sequential(*unshared_sub_layers)
        return shared_layers, unshared_layers
Esempio n. 15
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(
            num_channels=num_channels,
            num_filters=num_filters,
            filter_size=filter_size,
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=None,
            act=None,
            param_attr=fluid.param_attr.ParamAttr(),
            bias_attr=False)

        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=fluid.param_attr.ParamAttr(),
            bias_attr=fluid.param_attr.ParamAttr())
Esempio n. 16
0
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size=3,
                 stride=1,
                 groups=1,
                 padding=0,
                 act="leaky",
                 is_test=True):
        super(ConvBNLayer, self).__init__()

        self.conv = Conv2D(num_channels=ch_in,
                           num_filters=ch_out,
                           filter_size=filter_size,
                           stride=stride,
                           padding=padding,
                           groups=groups,
                           param_attr=ParamAttr(
                               initializer=fluid.initializer.Normal(0., 0.02)),
                           bias_attr=False,
                           act=None)
        self.batch_norm = BatchNorm(
            num_channels=ch_out,
            is_test=is_test,
            param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                0., 0.02),
                                 regularizer=L2Decay(0.)),
            bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                                regularizer=L2Decay(0.)))

        self.act = act
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 padding=0,
                 stride=1,
                 groups=None,
                 act=None,
                 name=None):
        super(ConvLayer, self).__init__()

        param_attr, bias_attr = initial_type(name=name,
                                             input_channels=num_channels,
                                             use_bias=True,
                                             filter_size=filter_size)

        self.num_filters = num_filters
        self._conv = Conv2D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            padding=padding,
                            stride=stride,
                            groups=groups,
                            act=act,
                            param_attr=param_attr,
                            bias_attr=bias_attr)
Esempio n. 18
0
 def __init__(self,
              num_channels,
              num_filters,
              filter_size,
              stride=1,
              groups=1,
              act=None):
     """
     num_channels, 卷积层的输入通道数
     num_filters, 卷积层的输出通道数
     stride, 卷积层的步幅
     groups, 分组卷积的组数,默认groups=1不使用分组卷积
     act, 激活函数类型,默认act=None不使用激活函数
     """
     super(ConvBNLayer, self).__init__()
     # 创建卷积层
     self.conv = Conv2D(num_channels=num_channels,
                        num_filters=num_filters,
                        filter_size=filter_size,
                        stride=stride,
                        padding=(filter_size - 1) // 2,
                        groups=groups,
                        act=None,
                        bias_attr=False)
     # 创建BatchNorm层
     self.batch_norm = BatchNorm(num_channels=num_filters, act=act)
Esempio n. 19
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act="relu",
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            param_attr=ParamAttr(
                                initializer=Normal(scale=0.001),
                                name=name + "_weights"),
                            bias_attr=False)
        bn_name = name + '_bn'
        self._batch_norm = BatchNorm(
            num_filters,
            weight_attr=ParamAttr(name=bn_name + '_scale',
                                  initializer=fluid.initializer.Constant(1.0)),
            bias_attr=ParamAttr(bn_name + '_offset',
                                initializer=fluid.initializer.Constant(0.0)))
        self.act = act
Esempio n. 20
0
    def __init__(self,
                 backbone,
                 num_classes,
                 in_channels,
                 channels=None,
                 pretrained_model=None,
                 ignore_index=255,
                 **kwargs):
        super(FCN, self).__init__()

        self.num_classes = num_classes
        self.ignore_index = ignore_index
        self.EPS = 1e-5
        if channels is None:
            channels = in_channels

        self.backbone = manager.BACKBONES[backbone](**kwargs)
        self.conv_last_2 = ConvBNLayer(num_channels=in_channels,
                                       num_filters=channels,
                                       filter_size=1,
                                       stride=1,
                                       name='conv-2')
        self.conv_last_1 = Conv2D(num_channels=channels,
                                  num_filters=self.num_classes,
                                  filter_size=1,
                                  stride=1,
                                  padding=0,
                                  param_attr=ParamAttr(
                                      initializer=Normal(scale=0.001),
                                      name='conv-1_weights'))
        self.init_weight(pretrained_model)
Esempio n. 21
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 conv_stride=1,
                 conv_padding=0,
                 conv_dilation=1,
                 conv_groups=1,
                 act=None,
                 use_cudnn=False,
                 param_attr=None,
                 bias_attr=None):
        super(SimpleImgConv, self).__init__()

        self._conv2d = Conv2D(num_channels=num_channels,
                              num_filters=num_filters,
                              filter_size=filter_size,
                              stride=conv_stride,
                              padding=conv_padding,
                              dilation=conv_dilation,
                              groups=conv_groups,
                              param_attr=None,
                              bias_attr=None,
                              act=act,
                              use_cudnn=use_cudnn)
Esempio n. 22
0
 def __init__(self,
              in_channels,
              num_filters,
              filter_size,
              stride=1,
              padding=0,
              groups=1,
              act='relu',
              name=None):
     super(ConvBNLayer, self).__init__()
     self.conv = Conv2D(num_channels=in_channels,
                        num_filters=num_filters,
                        filter_size=filter_size,
                        stride=stride,
                        padding=padding,
                        groups=groups,
                        act=None,
                        param_attr=ParamAttr(name=name + ".conv.weight"),
                        bias_attr=False)
     self.bn = BatchNorm(num_filters,
                         act=act,
                         epsilon=0.001,
                         param_attr=ParamAttr(name=name + ".bn.weight"),
                         bias_attr=ParamAttr(name=name + ".bn.bias"),
                         moving_mean_name=name + '.bn.running_mean',
                         moving_variance_name=name + '.bn.running_var')
Esempio n. 23
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=None,
                            act=None,
                            param_attr=fluid.param_attr.ParamAttr(name=name +
                                                                  "_weights"),
                            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]

        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name +
                                 "_scale"),  #fluid.param_attr.ParamAttr(),
            bias_attr=ParamAttr(bn_name +
                                "_offset"),  #fluid.param_attr.ParamAttr())
            moving_mean_name=bn_name + "_mean",
            moving_variance_name=bn_name + "_variance")
Esempio n. 24
0
 def __init__(self,
              dict_dim,
              emb_dim=128,
              hid_dim=128,
              fc_hid_dim=96,
              class_dim=2,
              channels=1,
              win_size=(3, 128)):
     super(CNN, self).__init__()
     self.dict_dim = dict_dim
     self.emb_dim = emb_dim
     self.hid_dim = hid_dim
     self.fc_hid_dim = fc_hid_dim
     self.class_dim = class_dim
     self.channels = channels
     self.win_size = win_size
     self.embedding = Embedding(size=[self.dict_dim + 1, self.emb_dim],
                                dtype='float64',
                                is_sparse=False,
                                padding_idx=0)
     self._conv2d = Conv2D(num_channels=self.channels,
                           num_filters=self.hid_dim,
                           filter_size=win_size,
                           padding=[1, 0],
                           use_cudnn=True,
                           act=None,
                           dtype="float64")
     self._fc_1 = Linear(input_dim=self.hid_dim,
                         output_dim=self.fc_hid_dim,
                         dtype="float64")
     self._fc_2 = Linear(input_dim=self.fc_hid_dim,
                         output_dim=self.class_dim,
                         act="softmax",
                         dtype="float64")
Esempio n. 25
0
 def __init__(self,
              height,
              width,
              with_r,
              with_boundary,
              in_channels,
              first_one=False,
              out_channels=256,
              kernel_size=1,
              stride=1,
              padding=0):
     super(CoordConvTh, self).__init__()
     self.out_channels = out_channels
     self.kernel_size = kernel_size
     self.stride = stride
     self.padding = padding
     self.addcoords = AddCoordsTh(height, width, with_r, with_boundary)
     in_channels += 2
     if with_r:
         in_channels += 1
     if with_boundary and not first_one:
         in_channels += 2
     self.conv = Conv2D(num_channels=in_channels,
                        num_filters=self.out_channels,
                        filter_size=self.kernel_size,
                        stride=self.stride,
                        padding=self.padding)
Esempio n. 26
0
    def __init__(self,
                 img_size=256,
                 img_ch=3,
                 style_dim=64,
                 max_conv_dim=512,
                 sn=False,
                 w_hpf=0):
        super(Generator, self).__init__()
        self.img_size = img_size
        self.img_ch = img_ch
        self.style_dim = style_dim
        self.max_conv_dim = max_conv_dim
        self.sn = sn
        self.channels = 2**14 // img_size  # if 256 -> 64
        self.w_hpf = w_hpf
        self.repeat_num = int(np.log2(img_size)) - 4  # if 256 -> 4
        if self.w_hpf == 1:
            self.repeat_num += 1
        self.from_rgb = Conv2D(num_channels=self.img_ch,
                               num_filters=self.channels,
                               filter_size=3,
                               padding=1,
                               param_attr=weight_initializer,
                               bias_attr=bias_initializer,
                               stride=1,
                               act=None)

        self.encode, self.decode, self.to_rgb = self.architecture_init()
Esempio n. 27
0
    def __init__(self,
                 input_channels,
                 output_channels,
                 filter_size,
                 stride=1,
                 padding=0,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(num_channels=input_channels,
                            num_filters=output_channels,
                            filter_size=filter_size,
                            stride=stride,
                            padding=padding,
                            param_attr=ParamAttr(name=name + "/weights"),
                            bias_attr=False)
        self._bn = BatchNorm(
            num_channels=output_channels,
            act=act,
            epsilon=1e-3,
            momentum=0.99,
            param_attr=ParamAttr(name=name + "/BatchNorm/gamma"),
            bias_attr=ParamAttr(name=name + "/BatchNorm/beta"),
            moving_mean_name=name + "/BatchNorm/moving_mean",
            moving_variance_name=name + "/BatchNorm/moving_variance")
Esempio n. 28
0
 def __init__(self, inc, outc, size, padding=1):
     super(Conv_Block, self).__init__()
     self.c1 = Conv2D(num_channels=inc,
                      num_filters=outc,
                      filter_size=size,
                      padding=padding)
     self.bn = BatchNorm(num_channels=outc, act='relu', in_place=True)
Esempio n. 29
0
    def __init__(self,
                 num_channels,
                 filter_size,
                 num_filters,
                 stride,
                 padding,
                 channels=None,
                 num_groups=1,
                 use_cudnn=True):
        super(ConvBNLayer, self).__init__()

        tmp_param = ParamAttr(name=self.full_name() + "_weights")
        self._conv = Conv2D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            stride=stride,
                            padding=padding,
                            groups=num_groups,
                            act=None,
                            use_cudnn=use_cudnn,
                            param_attr=tmp_param,
                            bias_attr=False)

        self._batch_norm = BatchNorm(
            num_filters,
            param_attr=ParamAttr(name=self.full_name() + "_bn" + "_scale"),
            bias_attr=ParamAttr(name=self.full_name() + "_bn" + "_offset"),
            moving_mean_name=self.full_name() + "_bn" + '_mean',
            moving_variance_name=self.full_name() + "_bn" + '_variance')
Esempio n. 30
0
    def __init__(self,
                 num_channels,
                 num_filters=64,
                 filter_size=7,
                 stride=1,
                 stddev=0.02,
                 padding=0,
                 norm=True,
                 norm_layer=InstanceNorm,
                 relu=True,
                 relufactor=0.0,
                 use_bias=False):
        super(conv2d, self).__init__()

        if use_bias == False:
            con_bias_attr = False
        else:
            con_bias_attr = fluid.ParamAttr(
                initializer=fluid.initializer.Constant(0.0))

        self.conv = Conv2D(num_channels=num_channels,
                           num_filters=int(num_filters),
                           filter_size=int(filter_size),
                           stride=stride,
                           padding=padding,
                           use_cudnn=use_cudnn,
                           param_attr=fluid.ParamAttr(
                               initializer=fluid.initializer.NormalInitializer(
                                   loc=0.0, scale=stddev)),
                           bias_attr=con_bias_attr)
        if norm_layer == InstanceNorm:
            self.bn = InstanceNorm(
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(1.0),
                    trainable=False),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0),
                    trainable=False),
            )
        elif norm_layer == BatchNorm:
            self.bn = BatchNorm(
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(initializer=fluid.initializer.
                                           NormalInitializer(1.0, 0.02)),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0)),
            )
        else:
            raise NotImplementedError

        self.relufactor = relufactor
        self.use_bias = use_bias
        self.norm = norm
        if relu:
            if relufactor == 0.0:
                self.lrelu = ReLU()
            else:
                self.lrelu = Leaky_ReLU(self.relufactor)
        self.relu = relu