コード例 #1
0
    def __init__(self, pretrained=True, **kwargs):
        super().__init__()
        self.encoder = resnet50(pretrained=pretrained)

        self.conv1 = nn.Sequential(self.encoder.conv1, self.encoder.bn1,
                                   self.encoder.relu)

        self.encoder1 = self.encoder.layer1  # 64
        self.encoder2 = self.encoder.layer2  # 128
        self.encoder3 = self.encoder.layer3  # 256
        self.encoder4 = self.encoder.layer4  # 512

        self.center_conv = nn.Sequential(
            L.ConvBn2d(2048, 512, kernel_size=3, padding=1),
            nn.ReLU(inplace=True))

        self.FPA = L.FeaturePyramidAttention_v2(512, 64)

        self.decoder5 = L.GlobalAttentionUpsample(1024, 64)
        self.decoder4 = L.GlobalAttentionUpsample(512, 64)
        self.decoder3 = L.GlobalAttentionUpsample(256, 64)

        self.upsample = nn.Upsample(scale_factor=4,
                                    mode='bilinear',
                                    align_corners=True)

        self.logit = nn.Conv2d(64, 1, kernel_size=1, padding=0)
コード例 #2
0
    def __init__(self, pretrained=True, **kwargs):
        super().__init__()
        self.encoder = se_resnet50(pretrained=pretrained)

        self.conv1 = self.encoder.layer0

        self.encoder1 = self.encoder.layer1  # 256
        self.encoder2 = self.encoder.layer2  # 512
        self.encoder3 = self.encoder.layer3  # 1024
        self.encoder4 = self.encoder.layer4  # 2048

        self.ema = nn.Sequential(
            L.ConvBn2d(2048, 512, kernel_size=3, padding=1, act=True),
            L.EMAModule(512, 64, lbda=1, alpha=0.1, T=3),
            L.ConvBn2d(512, 64, kernel_size=3, padding=1, act=True))

        self.decoder5 = L.GlobalAttentionUpsample(1024, 64)
        self.decoder4 = L.GlobalAttentionUpsample(512, 64)
        self.decoder3 = L.GlobalAttentionUpsample(256, 64)

        self.upsample = nn.Upsample(scale_factor=4,
                                    mode='bilinear',
                                    align_corners=True)

        self.logit = nn.Conv2d(64, 1, kernel_size=1, padding=0)