示例#1
0
文件: unet.py 项目: sherillL/seghome
    def __init__(self, num_channels, num_filters):
        super(Decoder, self).__init__()
        # TODO: decoder contains:
        #       1 2x2 transpose conv (makes feature map 2x larger)
        #       1 3x3 conv + 1bn + 1relu +
        #       1 3x3 conv + 1bn + 1relu

        self.up = Conv2DTranspose(num_channels=num_channels,
                                  num_filters=num_filters,
                                  filter_size=2,
                                  stride=2)

        self.conv1 = Conv2D(num_channels,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn1 = BatchNorm(num_filters, act='relu')

        self.conv2 = Conv2D(num_filters,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn2 = BatchNorm(num_filters, act='relu')
    def __init__(self, num_channels, num_filters):
        super(Encoder, self).__init__()
        #TODO: encoder contains:
        #       1 3x3conv + 1bn + relu +
        #       1 3x3conc + 1bn + relu +
        #       1 2x2 pool
        # return features before and after pool
        self.conv1 = Conv2D(num_channels,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn1 = BatchNorm(num_filters, act='relu')

        self.conv2 = Conv2D(num_filters,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn2 = BatchNorm(num_filters, act='relu')

        self.pool = Pool2D(pool_size=2,
                           pool_stride=2,
                           pool_type='max',
                           ceil_mode=True)
示例#3
0
    def __init__(self, in_planes, planes, stride=1, downsample=None):
        super().__init__()

        self.conv1 = conv3x3x3(in_planes, planes, stride)
        self.bn1 = BatchNorm(num_channels=planes, act='relu')
        self.conv2 = conv3x3x3(planes, planes)
        self.bn2 = BatchNorm(num_channels=planes)
        self.downsample = downsample
        self.stride = stride
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = BatchNorm(planes)
     self.relu = fluid.layers.relu  #nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = BatchNorm(planes)
     self.downsample = downsample
     self.stride = stride
示例#5
0
文件: idcgan.py 项目: jay9z/GAN-pp
 def __init__(self):
     super(Invertor,self).__init__()
     self.conv1 = Conv2D(num_channels=1,num_filters=64,filter_size=3,padding=1,stride=2,act='leaky_relu')
     self.conv2 = Conv2D(num_channels=64,num_filters=128,filter_size=3,padding=1,stride=2)
     self.bn2 = BatchNorm(num_channels=128,act='leaky_relu')
     self.conv3 = Conv2D(num_channels=128,num_filters=192,filter_size=3,padding=1,stride=2)
     self.bn3 = BatchNorm(num_channels=192,act='leaky_relu')
     self.conv4 = Conv2D(num_channels=192,num_filters=256,filter_size=3,padding=1,stride=2)
     self.bn4 = BatchNorm(num_channels=256,act='leaky_relu')
     self.fc = Linear(input_dim=1024,output_dim=64,act='tanh')
示例#6
0
文件: idcgan.py 项目: jay9z/GAN-pp
 def __init__(self):
     super(Generator,self).__init__()
     self.fc1 = Linear(input_dim=64,output_dim=1024)
     self.bn1 = BatchNorm(num_channels=256,act='relu')
     self.conv2 = Conv2DTranspose(num_channels=256,num_filters=192,filter_size=3,padding=1,output_size=4,stride=2)
     self.bn2 = BatchNorm(num_channels=192,act='relu')
     self.conv3 = Conv2DTranspose(num_channels=192,num_filters=128,filter_size=3,padding=1,output_size=8,stride=2)
     self.bn3 = BatchNorm(num_channels=128,act='relu')
     self.conv4 = Conv2DTranspose(num_channels=128,num_filters=64,filter_size=3,padding=1,output_size=16,stride=2)
     self.bn4 = BatchNorm(num_channels=64,act='relu')
     self.conv5 = Conv2DTranspose(num_channels=64,num_filters=1,filter_size=3,padding=1,output_size=32,stride=2,act='tanh')
 def __init__(self, num_scales, each_scales_size, point_scales_list, k=40):
     super(PointcloudCls, self).__init__()
     self.latentfeature = Latentfeature(num_scales, each_scales_size, point_scales_list)
     self.fc1 = Linear(1920, 1024)
     self.fc2 = Linear(1024, 512)
     self.fc3 = Linear(512, 256)
     self.fc4 = Linear(256, k)
     # self.dropout = nn.Dropout(p=0.3)
     self.bn1 = BatchNorm(1024, act='relu')
     self.bn2 = BatchNorm(512, act='relu')
     self.bn3 = BatchNorm(256, act='relu')
示例#8
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = Conv2D(inplanes, planes, filter_size=1, bias_attr=False)
     self.bn1 = BatchNorm(planes)
     self.conv2 = Conv2D(planes, planes, filter_size=3, stride=stride,
                            padding=1, bias_attr=False)
     self.bn2 = BatchNorm(planes)
     self.conv3 = Conv2D(planes, planes * self.expansion, filter_size=1, bias_attr=False)
     self.bn3 = BatchNorm(planes * self.expansion)
     self.relu = fluid.layers.relu  #nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
示例#9
0
    def __init__(self,
                 inputs,
                 filters,
                 is_training,
                 strides,
                 use_projection=False,
                 T=3,
                 data_format='channels_last',
                 non_local=False):

        super(Bottleneck3D, self).__init__()
        self.use_projection = use_projection
        filters_out = 4 * filters
        self.conv2d_1 = None if not (strides != 1 or inputs != filters_out
                                     ) else fluid.dygraph.Sequential(
                                         Conv2D(inputs,
                                                filters_out,
                                                filter_size=1,
                                                stride=strides,
                                                padding=0,
                                                bias_attr=fluid.ParamAttr(
                                                    trainable=False)),
                                         BatchNorm(filters_out))
        self.conv2d_2 = Conv2D(
            inputs,
            filters,
            filter_size=1,
            stride=1,
            param_attr=fluid.initializer.MSRAInitializer(uniform=False),
            bias_attr=fluid.ParamAttr(trainable=False))

        self.bn_2 = BatchNorm(filters, act='relu')

        self.conv2d_3 = Conv2D(
            filters,
            filters,
            filter_size=3,
            stride=(strides, strides),
            padding=1,
            param_attr=fluid.initializer.MSRAInitializer(uniform=False),
            bias_attr=fluid.ParamAttr(trainable=False))
        self.bn_3 = BatchNorm(filters, act='relu')
        self.conv2d_4 = Conv2D(
            filters,
            filters * 4,
            filter_size=1,
            stride=1,
            padding=0,
            param_attr=fluid.initializer.MSRAInitializer(uniform=False),
            bias_attr=fluid.ParamAttr(trainable=False))
        self.bn_4 = BatchNorm(4 * filters)
示例#10
0
文件: unet.py 项目: pennypm/PaddleSeg
 def __init__(self, num_channels, num_filters):
     super(DoubleConv, self).__init__()
     self.conv0 = Conv2D(num_channels=num_channels,
                         num_filters=num_filters,
                         filter_size=3,
                         stride=1,
                         padding=1)
     self.bn0 = BatchNorm(num_channels=num_filters)
     self.conv1 = Conv2D(num_channels=num_filters,
                         num_filters=num_filters,
                         filter_size=3,
                         stride=1,
                         padding=1)
     self.bn1 = BatchNorm(num_channels=num_filters)
示例#11
0
    def __init__(self, num_channels, num_filters, rates):
        super(ASPPModule, self).__init__()
        self.features = []
        self.features.append(
            fluid.dygraph.Sequential(Conv2D(num_channels, num_filters, 1),
                                     BatchNorm(num_filters, act='relu')))
        self.features.append(ASPPPooling(num_channels, num_filters))

        for r in rates:
            # ASPPConv 就是dilateConv,具体dilateConv的大小就是传入的参数r
            self.features.append(ASPPConv(num_channels, num_filters, r))

        self.project = fluid.dygraph.Sequential(
            Conv2D(num_filters * (2 + len(rates)), 256, 1),
            BatchNorm(num_filters, act='relu'))
示例#12
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None):
        """

        num_channels, 卷积层的输入通道数
        num_filters, 卷积层的输出通道数
        stride, 卷积层的步幅
        groups, 分组卷积的组数,默认groups=1不使用分组卷积
        act, 激活函数类型,默认act=None不使用激活函数
        """
        super(ConvBNLayer, self).__init__()

        # 创建卷积层
        self._conv = Conv2D(
            num_channels=num_channels,
            num_filters=num_filters,
            filter_size=filter_size,
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=groups,
            act=None,
            bias_attr=False,
        )

        # 创建BatchNorm层
        self._batch_norm = BatchNorm(num_filters, act=act)
示例#13
0
 def __init__(self, num_channels, num_filters, filter_size, **kwargs):
     super(ConvBn, self).__init__()
     self.conv = Conv2D(num_channels,
                        num_filters,
                        filter_size,
                        **kwargs)
     self.batch_norm = BatchNorm(num_filters)
示例#14
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size=3,
                 stride=1,
                 groups=1,
                 use_bn=True,
                 act='relu',
                 name=None):
        super(ConvBNLayer, self).__init__(name)

        self.use_bn = use_bn
        if use_bn:
            self.conv = Conv2D(num_channels=num_channels,
                                num_filters=num_filters,
                                filter_size=filter_size,
                                stride=stride,
                                padding=(filter_size-1)//2,
                                groups=groups,
                                act=None,
                                bias_attr=None)
            self.bn = BatchNorm(num_filters, act=act)
        else:
            self.conv = Conv2D(num_channels=num_channels,
                                num_filters=num_filters,
                                filter_size=filter_size,
                                stride=stride,
                                padding=(filter_size-1)//2,
                                groups=groups,
                                act=act,
                                bias_attr=None)
示例#15
0
 def __init__(self, num_channels, num_classes):
     super(DeepLabHead, self).__init__(
             ASPPModule(num_channels, 256, [12, 24, 36]),
             Conv2D(256, 256, 3, padding=1),
             BatchNorm(256, act='relu'),
             Conv2D(256, num_classes, 1)
             )
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None,
                 dilation=1,
                 padding=None,
                 name=None):
        super(ConvBNLayer, self).__init__(name)

        if padding is None:
            padding = (filter_size - 1) // 2
        else:
            padding = padding

        self.conv = Conv2D(num_channels=num_channels,
                           num_filters=num_filters,
                           filter_size=filter_size,
                           stride=stride,
                           padding=padding,
                           groups=groups,
                           act=None,
                           dilation=dilation,
                           bias_attr=False)
        self.bn = BatchNorm(num_filters, act=act)
示例#17
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act="relu",
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            act=None,
                            param_attr=ParamAttr(
                                initializer=Normal(scale=0.001),
                                name=name + "_weights"),
                            bias_attr=False)
        bn_name = name + '_bn'
        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale',
                                 initializer=fluid.initializer.Constant(1.0)),
            bias_attr=ParamAttr(bn_name + '_offset',
                                initializer=fluid.initializer.Constant(0.0)),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
示例#18
0
    def __init__(self,
                 in_c=768,
                 out_c=768,
                 filter_size=[3, 1],
                 dilation=1,
                 stride=1,
                 affine=False,
                 use_cudnn=True,
                 name=None):
        super(ReluConvBN, self).__init__()
        #conv_std = (2.0 /
        #            (filter_size[0] * filter_size[1] * out_c * in_c))**0.5
        conv_param = fluid.ParamAttr(name=name if name is None else
                                     (name + "_conv.weights"),
                                     initializer=fluid.initializer.MSRA())

        self.conv = Conv2D(in_c,
                           out_c,
                           filter_size,
                           dilation=[dilation, 1],
                           stride=stride,
                           padding=[(filter_size[0] - 1) * dilation // 2, 0],
                           param_attr=conv_param,
                           act=None,
                           bias_attr=False,
                           use_cudnn=use_cudnn)

        gama = ParamAttr(initializer=fluid.initializer.Constant(value=1),
                         trainable=affine)
        beta = ParamAttr(initializer=fluid.initializer.Constant(value=0),
                         trainable=affine)

        self.bn = BatchNorm(out_c, param_attr=gama, bias_attr=beta)
示例#19
0
 def __init__(self, in_channels, out_channels, spatial_stride=1, temporal_stride=1,
     dilation=1, act=None):
     super(Conv3x3x3BN, self).__init__()
     self.conv = Conv3D(in_channels, out_channels, filter_size=3,
         stride=(temporal_stride, spatial_stride, spatial_stride),
         padding=(1, dilation, dilation), dilation=dilation, bias_attr=False)
     self.bn = BatchNorm(out_channels, act=act)    
示例#20
0
    def __init__(
            self,
            num_filters,
            num_channels,
            filter_size,
            stride=1,
            groups=1,  # group参数暂时不用改
            act='relu',
            padding=None,
            name_scope=None,
            use_bias=False):

        super(ConvBNLayer, self).__init__(name_scope)

        self._conv = Conv2D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) //
                            2 if padding is None else padding,
                            groups=groups,
                            act=None,
                            bias_attr=use_bias)

        self._batch_norm = BatchNorm(num_filters, act=act)
    def __init__(self,
                 block=BasicBlock,
                 layers=50,
                 inp=3,
                 num_classes=400,
                 input_size=112,
                 dropout=0.5):
        self.inplanes = 64
        self.inp = inp
        super(ResNet, self).__init__()
        self.conv1 = Conv2D(inp,
                            64,
                            filter_size=7,
                            stride=2,
                            padding=3,
                            bias_attr=False)
        self.bn1 = BatchNorm(64)
        self.relu = fluid.layers.relu  #nn.ReLU(inplace=True)
        self.maxpool = Pool2D(
            pool_size=3, pool_stride=2, pool_padding=1,
            pool_type='max')  #nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.rep_of_rep = repofrep("flowofflow")
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        # probably need to adjust this based on input spatial size
        size = int(math.ceil(input_size / 32))
        self.avgpool = Pool2D(pool_size=size,
                              pool_stride=1,
                              pool_padding=0,
                              pool_type='avg')  #nn.AvgPool2d(size, stride=1)
        self.dropout = Dropout(dropout)  #nn.Dropout(p=dropout)
        self.fc = Linear(512 * block.expansion, num_classes)
    def __init__(self, num_classes=59, backbone='resnet50'):
        super(PSPNet, self).__init__()

        res = ResNet50(pretrained=False)
        # stem: res.conv, res.pool2d_max
        self.layer0 = fluid.dygraph.Sequential(
            res.conv,
            res.pool2d_max
        )
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4

        num_channels = 2048
        # psp: 2048 -> 2048*2
        self.pspmodule = PSPModule(num_channels, [1, 2, 3, 6])
        num_channels *= 2

        # cls: 2048*2 -> 512 -> num_classes
        self.classifier = fluid.dygraph.Sequential(
            Conv2D(num_channels, num_filters=512, filter_size=3, padding=1),
            BatchNorm(512, act='relu'),
            Dropout(0.1),
            Conv2D(512, num_classes, filter_size=1)
        )
示例#23
0
    def __init__(self, in_channels, out_channels, filter_size=3, stride=1,
        groups=1, act='relu'):
        super(ConvBN, self).__init__()

        self.conv = Conv2D(num_channels=in_channels, num_filters=out_channels,
            filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2,
            groups=groups, act=None, bias_attr=False)
        self.bn = BatchNorm(num_channels=out_channels, act=act)
示例#24
0
    def __init__(self, in_channels, out_channels, filter_size=1, stride=1, dilation=1, act=None):
        super(SeparateConvBN, self).__init__()

        self.conv = Conv2D(num_channels=in_channels, num_filters=in_channels,
            filter_size=filter_size, stride=stride, padding=(filter_size // 2) * dilation,
            groups=in_channels, dilation=dilation)
        self.pointwise = Conv2D(num_channels=in_channels, num_filters=out_channels,
            filter_size=1, stride=1, padding=0, groups=1, dilation=1)
        self.bn = BatchNorm(out_channels, act=act)
示例#25
0
 def __init__(self, num_channels, num_filters, dilation):
     super(ASPPConv, self).__init__(
         # dilation 既是所谓的空洞卷积。
         Conv2D(num_channels,
                num_filters,
                filter_size=3,
                padding=dilation,
                dilation=dilation),
         BatchNorm(num_filters, act='relu'))
示例#26
0
 def __init__(self, num_channels, bin_size_list):
     super(PSPModule, self).__init__()
     self.bin_size_list = bin_size_list
     num_filters = num_channels // len(bin_size_list)
     self.features = []
     for i in range(len(bin_size_list)):
         self.features.append(
             fluid.dygraph.Sequential(Conv2D(num_channels, num_filters, 1),
                                      BatchNorm(num_filters, act='relu')))
示例#27
0
 def __init__(self, num_scales, each_scales_size, point_scales_list):
     super(Latentfeature, self).__init__()
     self.num_scales = num_scales
     self.each_scales_size = each_scales_size
     self.point_scales_list = point_scales_list
     self.Convlayers1 = Convlayer(point_scales=self.point_scales_list[0])
     self.Convlayers2 = Convlayer(point_scales=self.point_scales_list[1])
     self.Convlayers3 = Convlayer(point_scales=self.point_scales_list[2])
     self.conv1 = Conv1D(prefix='lf', num_channels=3, num_filters=1, size_k=1, act=None)
     self.bn1 = BatchNorm(1, act='relu')
示例#28
0
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, act=None):
        super(SeparateConv3DBN, self).__init__()

        padding = kernel_size // 2 if isinstance(kernel_size, int) else \
            (kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[2] // 2)
        self.depthwise = Conv3D(in_channels, in_channels, filter_size=kernel_size,
            stride=stride, padding=padding, groups=in_channels)
        self.pointwise = Conv3D(in_channels, out_channels, filter_size=1,
            stride=1, padding=0, groups=1)
        self.bn = BatchNorm(out_channels, act=act)
示例#29
0
    def __init__(self, name_scope, out_chs=20, in_chs=1024, inter_chs=512):
        super(DANet, self).__init__(name_scope)
        name_scope = self.full_name()
        self.in_chs = in_chs
        self.out_chs = out_chs
        self.inter_chs = inter_chs if inter_chs else in_chs

        self.backbone = ResNet(50)
        self.conv5p = Sequential(
            Conv2D(self.in_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )
        self.conv5c = Sequential(
            Conv2D(self.in_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )

        self.sp = PAM_module(self.inter_chs)
        self.sc = CAM_module(self.inter_chs)

        self.conv6p = Sequential(
            Conv2D(self.inter_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )
        self.conv6c = Sequential(
            Conv2D(self.inter_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )

        self.conv7p = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
        self.conv7c = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
        self.conv7pc = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
示例#30
0
    def __init__(self, num_classes=1):
        super(CNN_LeakyRelu, self).__init__()

        self.conv1 = Conv2D(3, 64, 5, padding=2, stride=1, act='leaky_relu')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 5, padding=2, stride=1, act='leaky_relu')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 5, padding=2, stride=1, act='leaky_relu')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 5, padding=2, stride=1, act='leaky_relu')
        self.bn4 = BatchNorm(512)
        self.conv5 = Conv2D(512,
                            1024,
                            5,
                            padding=2,
                            stride=1,
                            act='leaky_relu')
        self.bn5 = BatchNorm(1024)
        self.conv6 = Conv2D(1024,
                            1024,
                            5,
                            padding=2,
                            stride=1,
                            act='leaky_relu')
        self.bn6 = BatchNorm(1024)

        self.fc1 = Linear(1024 * 7 * 7, 1024, act='leaky_relu')
        self.fc2 = Linear(1024, num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')