コード例 #1
0
ファイル: networks.py プロジェクト: na-gi/UGATIT-Paddle
    def forward(self, input):
        x = self.model(input)  #[1, 2048, 2, 2]

        gap = Pool2D(pool_size=x.shape[-1],
                     pool_stride=x.shape[-1],
                     pool_type='avg')(x)  #[1, 2048, 1, 1]
        gap = fluid.layers.reshape(gap, shape=[x.shape[0], -1])
        gap_logit = self.gap_fc(gap)  #torch.Size([1, 1])
        gap_weight = list(self.gap_fc.parameters())[0]
        gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[0])
        gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[3])
        gap = x * gap_weight  #[1, 2048, 2, 2]

        gmp = Pool2D(pool_size=x.shape[-1],
                     pool_stride=x.shape[-1],
                     pool_type='max')(x)
        gmp = fluid.layers.reshape(gmp, shape=[x.shape[0], -1])
        gmp_logit = self.gmp_fc(gmp)
        gmp_weight = list(self.gmp_fc.parameters())[0]
        gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[0])
        gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[3])
        gmp = x * gmp_weight

        cam_logit = fluid.layers.concat([gap_logit, gmp_logit], 1)
        x = fluid.layers.concat([gap, gmp], 1)
        x = fluid.layers.leaky_relu(self.conv1x1(x))

        heatmap = fluid.layers.reduce_sum(x, dim=1, keep_dim=True)

        x = self.pad(x)
        out = self.conv(x)

        return out, cam_logit, heatmap
コード例 #2
0
    def __init__(self, num_classes=1):
        super(CNN_AllTricks, self).__init__()

        self.conv1 = Conv2D(3, 64, 3, padding=1, stride=1, act='leaky_relu')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 3, padding=1, stride=1, act='leaky_relu')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 3, padding=1, stride=1, act='leaky_relu')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 3, padding=1, stride=1, act='leaky_relu')
        self.bn4 = BatchNorm(512)
        self.block5 = ConcatConv((512, 384), (384, 256), (256, 256),
                                 128,
                                 act_fun='leaky_relu')
        self.bn5 = BatchNorm(1024)
        self.block6 = ConcatConv((1024, 384), (384, 256), (256, 256),
                                 128,
                                 act_fun='leaky_relu')
        self.bn6 = BatchNorm(1024)

        self.pool_global = Pool2D(pool_stride=1,
                                  global_pooling=True,
                                  pool_type='avg')
        self.fc = Linear(input_dim=1024, output_dim=num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.dropout = Dropout(p=0.5)
コード例 #3
0
    def __init__(self,
                 block=BasicBlock,
                 layers=50,
                 inp=3,
                 num_classes=400,
                 input_size=112,
                 dropout=0.5):
        self.inplanes = 64
        self.inp = inp
        super(ResNet, self).__init__()
        self.conv1 = Conv2D(inp,
                            64,
                            filter_size=7,
                            stride=2,
                            padding=3,
                            bias_attr=False)
        self.bn1 = BatchNorm(64)
        self.relu = fluid.layers.relu  #nn.ReLU(inplace=True)
        self.maxpool = Pool2D(
            pool_size=3, pool_stride=2, pool_padding=1,
            pool_type='max')  #nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.rep_of_rep = repofrep("flowofflow")
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        # probably need to adjust this based on input spatial size
        size = int(math.ceil(input_size / 32))
        self.avgpool = Pool2D(pool_size=size,
                              pool_stride=1,
                              pool_padding=0,
                              pool_type='avg')  #nn.AvgPool2d(size, stride=1)
        self.dropout = Dropout(dropout)  #nn.Dropout(p=dropout)
        self.fc = Linear(512 * block.expansion, num_classes)
コード例 #4
0
    def forward(self, input):
        x = self.DownBlock(input)

        gap =Pool2D(pool_size=x.shape[-1],pool_stride=x.shape[-1],pool_type='avg')(x)
        gap=fluid.layers.reshape(gap, shape=[x.shape[0], -1])
        gap_logit = self.gap_fc(gap)
        gap_weight = list(self.gap_fc.parameters())[0]
        gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[0])
        gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[3])
        gap = x * gap_weight
        gmp =Pool2D(pool_size=x.shape[-1],pool_stride=x.shape[-1],pool_type='max')(x)
        gmp=fluid.layers.reshape(gmp, shape=[x.shape[0], -1])
        gmp_logit = self.gmp_fc(gmp)
        gmp_weight = list(self.gmp_fc.parameters())[0]
        
        gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[0])
        gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[3])     
        gmp = x * gmp_weight
        cam_logit = fluid.layers.concat([gap_logit, gmp_logit], 1)
        x = fluid.layers.concat([gap, gmp], 1)
        x = self.conv1x1(x)
        heatmap = fluid.layers.reduce_sum(x, dim=1, keep_dim=True)
        if self.light:
            x_ = fluid.layers.adaptive_pool2d(x, 1,pool_type='avg')
            x_=fluid.layers.reshape(x_, shape=[x.shape[0], -1])
            x_ = self.FC(x_)
        else:
            x_=fluid.layers.reshape(x, shape=[x.shape[0], -1])
            x_ = self.FC(x_)
        gamma, beta = self.gamma(x_), self.beta(x_)

        for i in range(self.n_blocks):
            x = getattr(self, 'UpBlock1_' + str(i+1))(x, gamma, beta)
        out = self.UpBlock2(x)
        return out, cam_logit, heatmap
コード例 #5
0
    def __init__(self, layers=50, class_dim=1):
        super(ResNet, self).__init__()
        self.layers = layers
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, "supported layers are {} but input layer is {}".format(supported_layers,
                                                                                                  layers)
        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_filters = [64, 128, 256, 512]
        self.conv = ConvBNLayer(num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
        self.pool2d_max = Pool2D(pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
        self.bottlenect_block_list = []
        num_channels = 64
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer('bb_%d_%d' % (block, i), BottleneckBlock(num_channels=num_channels,
                                                                                           num_filters=num_filters[
                                                                                               block],
                                                     stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut))
                num_channels = bottleneck_block._num_channels_out
                self.bottlenect_block_list.append(bottleneck_block)
                shortcut = True
        self.pool2d_avg = Pool2D(pool_size=7, pool_type='avg', global_pooling=True)

        stdv = 1.0 / math.sqrt(2048 * 1.0)
        self.out = Linear(input_dim=2048, output_dim=class_dim,
                          param_attr=fluid.param_attr.ParamAttr(initializer=fluid.initializer.Uniform(-stdv, stdv)))
コード例 #6
0
 def __init__(self):
     super(MyLeNet, self).__init__()
     self.hidden1_1 = Conv2D(1, 28, 5, 1)
     self.hidden1_2 = Pool2D(pool_size=2, pool_type='max', pool_stride=1)
     self.hidden2_1 = Conv2D(28, 32, 3, 1)
     self.hidden2_2 = Pool2D(pool_size=2, pool_type='max', pool_stride=1)
     self.hidden3 = Conv2D(32, 32, 3, 1)
     self.hidden4 = Linear(32 * 10 * 10, 65, act='softmax')
コード例 #7
0
 def __init__(self):
     super(MyLeNet,self).__init__()
     self.hidden1_1 = Conv2D()
     self.hidden1_2 = Pool2D()
     self.hidden2_1 = Conv2D()
     self.hidden2_2 = Pool2D()
     self.hidden3 = Conv2D()
     self.hidden4 = Linear()
コード例 #8
0
ファイル: demo.py プロジェクト: yyy20119/paddle_gesture
 def __init__(self):
     super(MyLeNet, self).__init__()
     self.c1 = Conv2D(3, 6, 5, 1)
     self.s2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
     self.c3 = Conv2D(6, 16, 5, 1)
     self.s4 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
     self.c5 = Conv2D(16, 120, 5, 1)
     self.f6 = Linear(120, 84, act='relu')
     self.f7 = Linear(84, 10, act='softmax')
コード例 #9
0
ファイル: networks.py プロジェクト: na-gi/UGATIT-Paddle
    def forward(self, input):
        x = self.DownBlock(input)
        # 得到编码器的输出,对应途中encoder feature map
        # torch.Size([1, 256, 64, 64])
        # gap torch.Size([1, 256, 1, 1])

        gap = Pool2D(pool_size=x.shape[-1],
                     pool_stride=x.shape[-1],
                     pool_type='avg')(x)  #全局平均池化
        gap = fluid.layers.reshape(gap, shape=[x.shape[0],
                                               -1])  #torch.Size([1, 1])
        gap_logit = self.gap_fc(gap)  #gap的预测
        gap_weight = list(self.gap_fc.parameters())[
            0]  #self.gap_fc的权重参数 torch.Size([1, 256])
        gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[0])
        gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[3])
        gap = x * gap_weight  #得到全局平均池化加持权重的特征图 torch.Size([1, 256, 64, 64])

        gmp = Pool2D(pool_size=x.shape[-1],
                     pool_stride=x.shape[-1],
                     pool_type='max')(x)
        gmp = fluid.layers.reshape(gmp, shape=[x.shape[0], -1])
        gmp_logit = self.gmp_fc(gmp)
        gmp_weight = list(self.gmp_fc.parameters())[0]
        gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[0])
        gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[3])
        gmp = x * gmp_weight  #torch.Size([1, 256, 64, 64])

        cam_logit = fluid.layers.concat([gap_logit, gmp_logit],
                                        1)  #结合gap和gmp的cam_logit预测
        x = fluid.layers.concat([gap, gmp], 1)  #torch.Size([1, 512, 64, 64])
        x = self.conv1x1(x)  #接入一个卷积层,通道数512转换为256 torch.Size([1, 256, 64, 64])
        #x = self.relu(self.conv1x1(x))
        #torch.Size([1, 256, 64, 64])

        # heatmap = torch.sum(x, dim=1, keepdim=True)
        heatmap = fluid.layers.reduce_sum(x, dim=1, keep_dim=True)  #得到注意力热力图
        #heatmap torch.Size([1, 1, 64, 64])

        if self.light:
            #轻量级则先经过一个gap
            x_ = fluid.layers.adaptive_pool2d(x, 1, pool_type='avg')
            x_ = fluid.layers.reshape(x_, shape=[x.shape[0], -1])
            x_ = self.FC(x_)
        else:
            x_ = fluid.layers.reshape(x, shape=[x.shape[0], -1])
            x_ = self.FC(x_)
        gamma, beta = self.gamma(x_), self.beta(x_)  #得到自适应gamma和beta
        # gamma torch.Size([1, 256]) beta torch.Size([1, 256])

        for i in range(self.n_blocks):
            # 将自适应gamma和beta送入到AdaILN
            x = getattr(self, 'UpBlock1_' + str(i + 1))(x, gamma, beta)
        out = self.UpBlock2(x)  #通过上采样后的模块,得到生成结果
        #out torch.Size([1, 3, 256, 256]) cam_logit torch.Size([1, 2])  heatmap torch.Size([1, 1, 64, 64])

        return out, cam_logit, heatmap  #模型输出为生成结果,cam预测以及热力图
コード例 #10
0
    def __init__(self):
        super(GoogLeNet, self).__init__()
        # 1
        self.conv1 = Conv2D(num_channels=3,
                            num_filters=64,
                            filter_size=7,
                            padding=3,
                            act='relu')
        self.pool1 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')

        # 2
        self.conv2_1 = Conv2D(num_channels=64,
                              num_filters=64,
                              filter_size=1,
                              act='relu')
        self.conv2_2 = Conv2D(num_channels=64,
                              num_filters=192,
                              filter_size=3,
                              padding=1,
                              act='relu')
        self.pool2 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')

        # 3
        self.block3_1 = Inception(192, 64, (96, 128), (16, 32), 32)
        self.block3_2 = Inception(256, 128, (128, 192), (32, 96), 64)
        self.pool3 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')

        # 4
        self.block4_1 = Inception(480, 192, (96, 208), (16, 48), 64)
        self.block4_2 = Inception(512, 160, (112, 224), (24, 64), 64)
        self.block4_3 = Inception(512, 128, (128, 256), (24, 64), 64)
        self.block4_4 = Inception(512, 112, (144, 288), (32, 64), 64)
        self.block4_5 = Inception(528, 256, (160, 320), (32, 128), 128)
        self.pool4 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')

        # 5
        self.block5_1 = Inception(832, 256, (160, 320), (32, 128), 128)
        self.block5_2 = Inception(832, 384, (192, 384), (48, 128), 128)
        self.pool5 = Pool2D(pool_stride=1,
                            global_pooling=True,
                            pool_type='avg')

        self.fc = Linear(input_dim=1024, output_dim=1, act=None)
コード例 #11
0
    def __init__(self, num_classes=1):
        super(CNN_LeakyRelu, self).__init__()

        self.conv1 = Conv2D(3, 64, 5, padding=2, stride=1, act='leaky_relu')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 5, padding=2, stride=1, act='leaky_relu')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 5, padding=2, stride=1, act='leaky_relu')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 5, padding=2, stride=1, act='leaky_relu')
        self.bn4 = BatchNorm(512)
        self.conv5 = Conv2D(512,
                            1024,
                            5,
                            padding=2,
                            stride=1,
                            act='leaky_relu')
        self.bn5 = BatchNorm(1024)
        self.conv6 = Conv2D(1024,
                            1024,
                            5,
                            padding=2,
                            stride=1,
                            act='leaky_relu')
        self.bn6 = BatchNorm(1024)

        self.fc1 = Linear(1024 * 7 * 7, 1024, act='leaky_relu')
        self.fc2 = Linear(1024, num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
コード例 #12
0
ファイル: unet.py プロジェクト: pennypm/PaddleSeg
 def __init__(self, num_channels, num_filters):
     super(Down, self).__init__()
     self.max_pool = Pool2D(pool_size=2,
                            pool_type='max',
                            pool_stride=2,
                            pool_padding=0)
     self.double_conv = DoubleConv(num_channels, num_filters)
コード例 #13
0
 def __init__(self, c0, c1, c2, c3, c4):
     super(Inception, self).__init__()
     self.p1_1 = Conv2D(num_channels=c0,
                        num_filters=c1,
                        filter_size=1,
                        act='relu')
     self.p2_1 = Conv2D(num_channels=c0,
                        num_filters=c2[0],
                        filter_size=1,
                        act='relu')
     self.p2_2 = Conv2D(num_channels=c2[0],
                        num_filters=c2[1],
                        filter_size=3,
                        padding=1,
                        act='relu')
     self.p3_1 = Conv2D(num_channels=c0,
                        num_filters=c3[0],
                        filter_size=1,
                        act='relu')
     self.p3_2 = Conv2D(num_channels=c3[0],
                        num_filters=c3[1],
                        filter_size=5,
                        padding=2,
                        act='relu')
     self.p4_1 = Pool2D(pool_size=3,
                        pool_stride=1,
                        pool_padding=1,
                        pool_type='max')
     self.p4_2 = Conv2D(num_channels=c0,
                        num_filters=c4,
                        filter_size=1,
                        act='relu')
コード例 #14
0
    def __init__(self, num_channels, num_filters):
        super(Encoder, self).__init__()
        #TODO: encoder contains:
        #       1 3x3conv + 1bn + relu +
        #       1 3x3conc + 1bn + relu +
        #       1 2x2 pool
        # return features before and after pool
        self.conv1 = Conv2D(num_channels,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn1 = BatchNorm(num_filters, act='relu')

        self.conv2 = Conv2D(num_filters,
                            num_filters,
                            filter_size=3,
                            stride=1,
                            padding=1)
        self.bn2 = BatchNorm(num_filters, act='relu')

        self.pool = Pool2D(pool_size=2,
                           pool_stride=2,
                           pool_type='max',
                           ceil_mode=True)
コード例 #15
0
    def __init__(self, num_classes=1):
        super(GoogLeNet_BN, self).__init__()

        self.bn64 = BatchNorm(64)
        self.bn192 = BatchNorm(192)
        self.bn256 = BatchNorm(256)
        self.bn480 = BatchNorm(480)
        self.bn512 = BatchNorm(512)
        self.bn528 = BatchNorm(528)
        self.bn832 = BatchNorm(832)
        self.bn1024 = BatchNorm(1024)

        self.conv1 = Conv2D(3, 64, 7, padding=3, stride=2, act='relu')
        self.pool1 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')
        self.conv2_1 = Conv2D(64, 64, 1, act='relu')
        self.conv2_2 = Conv2D(64, 192, 3, padding=1, act='relu')
        self.pool2 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')
        self.block3_a = Inception((192, 64), (96, 128), (16, 32), 32)
        self.block3_b = Inception((256, 128), (128, 192), (32, 96), 64)
        self.pool3 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')
        self.block4_a = Inception((480, 192), (96, 208), (16, 48), 64)
        self.block4_b = Inception((512, 160), (112, 224), (24, 64), 64)
        self.block4_c = Inception((512, 128), (128, 256), (24, 64), 64)
        self.block4_d = Inception((512, 112), (144, 288), (32, 64), 64)
        self.block4_e = Inception((528, 256), (160, 320), (32, 128), 128)
        self.pool4 = Pool2D(pool_size=3,
                            pool_stride=2,
                            pool_padding=1,
                            pool_type='max')
        self.block5_a = Inception((832, 256), (160, 320), (32, 128), 128)
        self.block5_b = Inception((832, 384), (192, 384), (48, 128), 128)
        self.pool5 = Pool2D(pool_size=7,
                            pool_stride=1,
                            global_pooling=True,
                            pool_type='avg')
        self.drop = Dropout(p=0.4)
        self.fc = Linear(1024, num_classes)
コード例 #16
0
    def __init__(self, num_classes=1):
        super(AlexNet, self).__init__()

        self.conv1 = Conv2D(num_channels=3, num_filters=96, filter_size=11, stride=4, padding=2, act='relu')
        self.pool1 = Pool2D(pool_size=3, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=96, num_filters=256, filter_size=5, stride=1, padding=2, act='relu')
        self.pool2 = Pool2D(pool_size=3, pool_stride=2, pool_type='max')
        self.conv3 = Conv2D(num_channels=256, num_filters=384, filter_size=3, stride=1, padding=1, act='relu')
        self.conv4 = Conv2D(num_channels=384, num_filters=384, filter_size=3, stride=1, padding=1, act='relu')
        self.conv5 = Conv2D(num_channels=384, num_filters=256, filter_size=3, stride=1, padding=1, act='relu')
        self.pool5 = Pool2D(pool_size=3, pool_stride=2, pool_type='max')

        self.fc1 = Linear(256 * 6 * 6, 4096, act='relu')
        self.drop_out1 = Dropout(p=0.5)
        self.fc2 = Linear(4096, 4096, act='relu')
        self.drop_out2 = Dropout(p=0.5)
        self.fc3 = Linear(4096, num_classes, act='relu')
コード例 #17
0
ファイル: basic_vgg.py プロジェクト: sherillL/seghome
 def __init__(self, num_convs, in_channels, out_channels):
     super(vgg_block, self).__init__()
     self.conv_list = []
     for i in range(num_convs):
         conv_layer = self.add_sublayer('conv_' + str(i), Conv2D(num_channels=in_channels,
                                         num_filters=out_channels, filter_size=3, padding=1, act='relu'))
         self.conv_list.append(conv_layer)
         in_channels = out_channels
     self.pool = Pool2D(pool_stride=2, pool_size=2, pool_type='max')
コード例 #18
0
 def __init__(self, point_scales):
     super(Convlayer, self).__init__()
     self.point_scales = point_scales
     self.conv1 = Conv2D(1, 64, (1, 3))
     self.conv2 = Conv2D(64, 64, 1)
     self.conv3 = Conv2D(64, 128, 1)
     self.conv4 = Conv2D(128, 256, 1)
     self.conv5 = Conv2D(256, 512, 1)
     self.conv6 = Conv2D(512, 1024, 1)
     self.maxpool = Pool2D(pool_size=(self.point_scales, 1), pool_stride=1)
コード例 #19
0
ファイル: LeNet.py プロジェクト: ralph0813/Paddle_CV_Baseline
    def __init__(self, num_classes=1):
        super(LeNet, self).__init__()
        self.conv1 = Conv2D(num_channels=3,
                            num_filters=6,
                            filter_size=5,
                            act='relu')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=6,
                            num_filters=16,
                            filter_size=5,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv3 = Conv2D(num_channels=16,
                            num_filters=120,
                            filter_size=4,
                            act='relu')

        self.fc1 = Linear(input_dim=120 * 50 * 50, output_dim=84, act='relu')
        self.fc2 = Linear(input_dim=84, output_dim=num_classes)
コード例 #20
0
def build_pooling(attr, channels=None, conv_bias=False):
    method = attr['mode']
    pad = attr['pad'] if 'pad' in attr else 0
    if method == 'max':
        pool = Pool2D(attr['kernel_size'],
                      'max',
                      attr['stride'],
                      pad,
                      ceil_mode=True)  # all Caffe pooling use ceil model
    elif method == 'ave':
        pool = Pool2D(attr['kernel_size'],
                      'avg',
                      attr['stride'],
                      pad,
                      ceil_mode=True)  # all Caffe pooling use ceil model
    else:
        raise ValueError("Unknown pooling method: {}".format(method))

    return pool, channels
コード例 #21
0
ファイル: vgg16.py プロジェクト: fairytail655/paddle-net
 def __make_layer(self, in_dim, cfg):
     in_planes = in_dim
     layer_list = []
     for layer in cfg:
         for out_planes in layer:
             layer_list.append(BasicConv(in_planes, out_planes))
             in_planes = out_planes
         layer_list.append(
             Pool2D(pool_size=2, pool_type='max', pool_stride=2))
     return Sequential(*layer_list)
コード例 #22
0
ファイル: fcn8s.py プロジェクト: sherillL/seghome
    def __init__(self, num_classes=59):
        super(FCN8s, self).__init__()
        backbone = VGG16BN(pretrained=False)

        self.layer1 = backbone.layer1
        self.layer1[0].conv._padding = [100, 100]
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)
        self.layer2 = backbone.layer2
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)
        self.layer3 = backbone.layer3
        self.pool3 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)
        self.layer4 = backbone.layer4
        self.pool4 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)
        self.layer5 = backbone.layer5
        self.pool5 = Pool2D(pool_size=2, pool_stride=2, ceil_mode=True)

        self.fc6 = Conv2D(512, 4096, 7, act='relu')
        self.fc7 = Conv2D(4096, 4096, 1, act='relu')
        self.drop6 = Dropout()
        self.drop7 = Dropout()

        self.score = Conv2D(4096, num_classes, 1)
        self.score_pool3 = Conv2D(256, num_classes, 1)
        self.score_pool4 = Conv2D(512, num_classes, 1)

        self.up_output = Conv2DTranspose(num_channels=num_classes,
                                        num_filters=num_classes,
                                        filter_size=4,
                                        stride=2,
                                        bias_attr=False)

        self.up_pool4 = Conv2DTranspose(num_channels=num_classes,
                                        num_filters=num_classes,
                                        filter_size=4,
                                        stride=2,
                                        bias_attr=False)

        self.up_final = Conv2DTranspose(num_channels=num_classes,
                                        num_filters=num_classes,
                                        filter_size=16,
                                        stride=8,
                                        bias_attr=False)                                
コード例 #23
0
    def __init__(self, num_classes=1):
        super(AlexNet, self).__init__()

        self.conv1 = Conv2D(num_channels=3,
                            num_filters=96,
                            filter_size=11,
                            stride=4,
                            padding=5,
                            act='relu')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=96,
                            num_filters=256,
                            filter_size=5,
                            stride=1,
                            padding=2,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv3 = Conv2D(num_channels=256,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv4 = Conv2D(num_channels=384,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.conv5 = Conv2D(num_channels=384,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=1,
                            act='relu')
        self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')

        self.fc1 = Linear(input_dim=12544, output_dim=4096, act='relu')
        self.drop_ratio1 = 0.5
        self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=4096, output_dim=num_classes)
コード例 #24
0
 def __init__(self, c1, c2, c3, c4):
     super(Inception, self).__init__()
     self.p1_1 = Conv2D(c1[0], c1[1], 1, act='relu')
     self.p2_1 = Conv2D(c1[0], c2[0], 1, act='relu')
     self.p2_2 = Conv2D(c2[0], c2[1], 3, padding=1, act='relu')
     self.p3_1 = Conv2D(c1[0], c3[0], 1, act='relu')
     self.p3_2 = Conv2D(c3[0], c3[1], 5, padding=2, act='relu')
     self.p4_1 = Pool2D(pool_size=3,
                        pool_stride=1,
                        pool_padding=1,
                        pool_type='max')
     self.p4_2 = Conv2D(c1[0], c4, 1, act='relu')
コード例 #25
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(ImperativeLenet, self).__init__()
        self.features = Sequential(
            Conv2D(num_channels=1,
                   num_filters=6,
                   filter_size=3,
                   stride=1,
                   padding=1),
            Pool2D(pool_size=2, pool_type='max', pool_stride=2),
            Conv2D(num_channels=6,
                   num_filters=16,
                   filter_size=5,
                   stride=1,
                   padding=0),
            Pool2D(pool_size=2, pool_type='max', pool_stride=2))

        self.fc = Sequential(
            Linear(input_dim=400, output_dim=120),
            Linear(input_dim=120, output_dim=84),
            Linear(input_dim=84,
                   output_dim=num_classes,
                   act=classifier_activation))
コード例 #26
0
ファイル: fcn8s.py プロジェクト: jay9z/ImageSegmentation-pp
    def __init__(self,num_classes=59):
        super(FCN8s,self).__init__()
        vgg16bn = VGG16BN()
        self.layer1 = vgg16bn.layer1
        self.layer1[0].conv._padding = [100,100]
        self.layer2 = vgg16bn.layer2
        self.layer3 = vgg16bn.layer3
        self.layer4 = vgg16bn.layer4
        self.layer5 = vgg16bn.layer5

        # self.conv1_1 = Conv2D(3,64,3,padding=1)
        # self.conv1_2 = Conv2D(64,64,3,padding=1)
        self.pool1 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        # self.conv2_1 = Conv2D(64,128,3,padding=1)
        # self.conv2_2 = Conv2D(128,128,3,padding=1)
        self.pool2 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        # self.conv3_1 = Conv2D(128,256,3,padding=1)
        # self.conv3_2 = Conv2D(256,256,3,padding=1)
        # self.conv3_3 = Conv2D(256,256,3,padding=1)
        self.pool3 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        # self.conv4_1 = Conv2D(256,512,3,padding=1)
        # self.conv4_2 = Conv2D(512,512,3,padding=1)
        # self.conv4_3 = Conv2D(512,512,3,padding=1)
        self.pool4 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        # self.conv5_1 = Conv2D(512,512,3,padding=1)
        # self.conv5_2 = Conv2D(512,512,3,padding=1)
        # self.conv5_3 = Conv2D(512,512,3,padding=1)
        self.pool5 = Pool2D(pool_size=2,pool_stride=2,ceil_mode=True)
        self.conv6 = Conv2D(512,4096,1,act='relu')
        self.conv7 = Conv2D(4096,4096,1,act='relu')
        self.drop6 = Dropout()
        self.drop7 = Dropout()

        self.score = Conv2D(4096,num_classes,1)
        self.score_pool3 = Conv2D(256,num_classes,1)
        self.score_pool4 = Conv2D(512,num_classes,1,)
        self.upsample1 = Conv2DTranspose(num_classes,num_classes,filter_size=4,stride=2,padding=2,bias_attr=False)
        self.upsample2 = Conv2DTranspose(num_classes,num_classes,filter_size=4,stride=2,padding=2,bias_attr=False)
        self.upsample3 = Conv2DTranspose(num_classes,num_classes,filter_size=16,stride=8,padding=1,bias_attr=False)
コード例 #27
0
    def __init__(self, c1, c2, c3, c4, act_fun='sigmoid'):
        super(ConcatConv, self).__init__()

        self.p1_1 = Conv2D(c1[0], c1[1], 1, act=act_fun)
        self.p2_1 = Conv2D(c1[0], c2[0], 1, act=act_fun)
        self.p2_2 = Conv2D(c2[0], c2[1], 3, padding=1, act=act_fun)
        self.p3_1 = Conv2D(c1[0], c3[0], 1, act=act_fun)
        self.p3_2 = Conv2D(c3[0], c3[1], 5, padding=2, act=act_fun)
        self.p4_1 = Pool2D(pool_size=3,
                           pool_stride=1,
                           pool_padding=1,
                           pool_type='max')
        self.p4_2 = Conv2D(c1[0], c4, 1, act=act_fun)
コード例 #28
0
    def __init__(self, num_classes=1):
        super(LeNet, self).__init__()

        # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
        self.conv1 = Conv2D(num_channels=3,
                            num_filters=6,
                            filter_size=5,
                            act='relu')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=6,
                            num_filters=16,
                            filter_size=5,
                            act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # 创建第3个卷积层
        self.conv3 = Conv2D(num_channels=16,
                            num_filters=120,
                            filter_size=4,
                            act='relu')
        # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分类标签的类别数
        self.fc1 = Linear(input_dim=300000, output_dim=64, act='relu')
        self.fc2 = Linear(input_dim=64, output_dim=num_classes)
コード例 #29
0
    def __init__(self, num_classes=1):
        super(CNN_PoolReplaceFC, self).__init__()

        self.conv1 = Conv2D(3, 64, 5, padding=2, stride=1, act='sigmoid')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 5, padding=2, stride=1, act='sigmoid')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 5, padding=2, stride=1, act='sigmoid')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 5, padding=2, stride=1, act='sigmoid')
        self.bn4 = BatchNorm(512)
        self.conv5 = Conv2D(512, 1024, 5, padding=2, stride=1, act='sigmoid')
        self.bn5 = BatchNorm(1024)
        self.conv6 = Conv2D(1024, 1024, 5, padding=2, stride=1, act='sigmoid')
        self.bn6 = BatchNorm(1024)

        # 用此处的全局平均池化代替原来的全连接层
        self.pool_global = Pool2D(pool_stride=1,
                                  global_pooling=True,
                                  pool_type='avg')
        self.fc = Linear(input_dim=1024, output_dim=num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
コード例 #30
0
    def __init__(self, num_classes=1):
        super(CNN, self).__init__()

        self.conv1 = Conv2D(3, 64, 5, padding=2, stride=1, act='sigmoid')
        self.conv2 = Conv2D(64, 128, 5, padding=2, stride=1, act='sigmoid')
        self.conv3 = Conv2D(128, 256, 5, padding=2, stride=1, act='sigmoid')
        self.conv4 = Conv2D(256, 512, 5, padding=2, stride=1, act='sigmoid')
        self.conv5 = Conv2D(512, 1024, 5, padding=2, stride=1, act='sigmoid')
        self.conv6 = Conv2D(1024, 1024, 5, padding=2, stride=1, act='sigmoid')

        self.fc1 = Linear(1024 * 7 * 7, 1024, act='sigmoid')
        self.fc2 = Linear(1024, num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')