Пример #1
0
    def __init__(self, gray_input=False, use_bn=True):
        super(SANet, self).__init__()
        if gray_input:
            in_channels = 1
        else:
            in_channels = 3

        self.encoder = nn.Sequential(
            SAModule_Head(in_channels, 64, use_bn),
            nn.MaxPool2d(2, 2),
            SAModule(64, 128, use_bn),
            nn.MaxPool2d(2, 2),
            SAModule(128, 128, use_bn),
            nn.MaxPool2d(2, 2),
            SAModule(128, 64, use_bn),
        )

        self.decoder = nn.Sequential(
            BasicConv(64, 64, use_bn=use_bn, kernel_size=9, padding=4),
            BasicDeconv(64, 64, 2, stride=2, use_bn=use_bn),
            BasicConv(64, 32, use_bn=use_bn, kernel_size=7, padding=3),
            BasicDeconv(32, 32, 2, stride=2, use_bn=use_bn),
            BasicConv(32, 16, use_bn=use_bn, kernel_size=5, padding=2),
            BasicDeconv(16, 16, 2, stride=2, use_bn=use_bn),
            BasicConv(16, 16, use_bn=use_bn, kernel_size=3, padding=1),
            BasicConv(16, 16, use_bn=use_bn, kernel_size=5, padding=2),
            BasicConv(16, 1, use_bn=False, kernel_size=1),
        )
        initialize_weights(self.modules())
Пример #2
0
    def __init__(self, pretrained=True, num_norm=6):
        super(Res101_SFCN_BN, self).__init__()

        self.num_norm = num_norm
        self.backend_feat = [512, 512, 512, 256, 128, 64]

        self.frontend = []
        self.backend = make_layers(self.backend_feat,
                                   in_channels=1024,
                                   dilation=True,
                                   batch_norm=True,
                                   num_norm=self.num_norm)
        self.convDU = convDU(in_out_channels=64, kernel_size=(1, 9))
        self.convLR = convLR(in_out_channels=64, kernel_size=(9, 1))

        self.output_layer = nn.Sequential(nn.Conv2d(64, 1, kernel_size=1),
                                          nn.ReLU())

        initialize_weights(self.modules())

        res = models.resnet101(pretrained=pretrained)
        self.frontend = nn.Sequential(res.conv1, res.bn1, res.relu,
                                      res.maxpool, res.layer1, res.layer2)
        self.own_reslayer_3 = make_res_layer(Bottleneck, 256, 23, stride=1)
        self.own_reslayer_3.load_state_dict(res.layer3.state_dict())
Пример #3
0
    def __init__(self, in_dim, out_dim, dim, n_blk, norm, activ):
        super(GuidingMLP, self).__init__()
        self.model = []

        dim = 256
        self.model += [
            LinearBlock(dim, out_dim, norm='none', activation='none')
        ]
        self.model = nn.Sequential(*self.model)

        # default initialization of gaussian with mean 1 and std 0.01
        initialize_weights(self.modules())
Пример #4
0
    def __init__(self, norm=None, num_gbnnorm=6):
        super(CrowdDecoder, self).__init__()

        self.norm = norm
        self.num_gbnnorm = num_gbnnorm
        self.backend_feat = [512, 512, 512, 256, 128, 64]

        self.backend = make_layers(self.backend_feat, in_channels=1024, dilation=True, norm=self.norm,
                                   num_gbnnorm=self.num_gbnnorm)
        self.convDU = convDU(in_out_channels=64, kernel_size=(1, 9))
        self.convLR = convLR(in_out_channels=64, kernel_size=(9, 1))
        self.output_layer = nn.Sequential(nn.Conv2d(64, 1, kernel_size=1), nn.ReLU())

        initialize_weights(self.modules())
Пример #5
0
    def __init__(self, norm=None, num_gbnnorm=6):
        super(CrowdDecoder, self).__init__()

        self.norm = norm
        self.num_gbnnorm = num_gbnnorm

        self.backend_feat = [512, 512, 512, 256, 128, 64]
        self.backend = make_layers(
            self.backend_feat,
            in_channels=512,
            dilation=True,
            norm=self.norm,
            num_gbnnorm=self.num_gbnnorm)
        self.output_layer = nn.Conv2d(64, 1, kernel_size=1)

        initialize_weights(self.modules())
Пример #6
0
    def __init__(self,
                 downs,
                 ind_im,
                 dim,
                 latent_dim,
                 norm,
                 activ,
                 pad_type,
                 pool_type='adapt_avg_pool'):
        super(GuidingEncoder, self).__init__()
        self.model = []
        self.model += [
            Conv2dBlock(ind_im,
                        dim,
                        7,
                        1,
                        3,
                        norm=norm,
                        activation=activ,
                        pad_type=pad_type)
        ]
        for i in range(2):
            self.model += [
                Conv2dBlock(dim,
                            2 * dim,
                            4,
                            2,
                            1,
                            norm=norm,
                            activation=activ,
                            pad_type=pad_type)
            ]
            dim *= 2

        if pool_type == 'adapt_avg_pool':
            self.model += [nn.AdaptiveAvgPool2d(1)]
        elif pool_type == 'adapt_max_pool':
            self.model += [nn.AdaptiveMaxPool2d(1)]
        self.model = nn.Sequential(*self.model)

        # default initialization of gaussian with mean 1 and std 0.01
        initialize_weights(self.modules())
Пример #7
0
    def __init__(self, bn=False, num_classes=10):
        super(ori, self).__init__()

        self.num_classes = num_classes
        self.base_layer = nn.Sequential(
            Conv2d(1, 16, 9, same_padding=True, NL='prelu', bn=bn),
            Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn))

        self.hl_prior = nn.Sequential(
            Conv2d(32, 16, 9, same_padding=True, NL='prelu', bn=bn),
            nn.MaxPool2d(2),
            Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn),
            nn.MaxPool2d(2),
            Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn),
            Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn))

        self.roi_pool = RoIPool([16, 16], 1 / 4.0)
        self.hl_prior_conv2d = Conv2d(32,
                                      16,
                                      1,
                                      same_padding=True,
                                      NL='prelu',
                                      bn=bn)

        self.bbx_pred = nn.Sequential(FC(16 * 16 * 16, 512, NL='prelu'),
                                      FC(512, 256, NL='prelu'),
                                      FC(256, self.num_classes, NL='prelu'))

        # generate dense map
        self.den_stage_1 = nn.Sequential(
            Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn),
            nn.MaxPool2d(2),
            Conv2d(32, 64, 5, same_padding=True, NL='prelu', bn=bn),
            nn.MaxPool2d(2),
            Conv2d(64, 32, 5, same_padding=True, NL='prelu', bn=bn),
            Conv2d(32, 32, 5, same_padding=True, NL='prelu', bn=bn))

        self.den_stage_DULR = nn.Sequential(
            convDU(in_out_channels=32, kernel_size=(1, 9)),
            convLR(in_out_channels=32, kernel_size=(9, 1)))

        self.den_stage_2 = nn.Sequential(
            Conv2d(64, 64, 3, same_padding=True, NL='prelu', bn=bn),
            Conv2d(64, 32, 3, same_padding=True, NL='prelu', bn=bn),
            nn.ConvTranspose2d(32,
                               16,
                               4,
                               stride=2,
                               padding=1,
                               output_padding=0,
                               bias=True), nn.PReLU(),
            nn.ConvTranspose2d(16,
                               8,
                               4,
                               stride=2,
                               padding=1,
                               output_padding=0,
                               bias=True), nn.PReLU())

        # generrate seg map
        self.seg_stage = nn.Sequential(
            Conv2d(32, 32, 1, same_padding=True, NL='prelu', bn=bn),
            Conv2d(32, 64, 3, same_padding=True, NL='prelu', bn=bn),
            Conv2d(64, 32, 3, same_padding=True, NL='prelu', bn=bn),
            nn.ConvTranspose2d(32,
                               16,
                               4,
                               stride=2,
                               padding=1,
                               output_padding=0,
                               bias=True), nn.PReLU(),
            nn.ConvTranspose2d(16,
                               8,
                               4,
                               stride=2,
                               padding=1,
                               output_padding=0,
                               bias=True), nn.PReLU())

        self.seg_pred = Conv2d(8, 2, 1, same_padding=True, NL='relu', bn=bn)

        self.trans_den = Conv2d(8, 8, 1, same_padding=True, NL='relu', bn=bn)

        self.den_pred = Conv2d(16, 1, 1, same_padding=True, NL='relu', bn=bn)

        # initialize_weights(self.modules())

        weights_normal_init(self.base_layer, self.hl_prior, self.hl_prior_conv2d, self.bbx_pred, self.den_stage_1, \
                            self.den_stage_DULR, self.den_stage_2, self.trans_den, self.den_pred)
        initialize_weights(self.seg_stage, self.seg_pred)