コード例 #1
0
ファイル: FCNs.py プロジェクト: mrluin/SANet-PyTorch
    def __init__(self, configs, outc=6, stride=32, pretrained=True):
        super(FCN16s, self).__init__()

        self.name = 'FCN16s_resnet34'
        self.configs = configs
        backbone = models.resnet34(pretrained=pretrained)

        self.conv0 = backbone.conv1
        self.bn0 = backbone.bn1
        self.relu0 = backbone.relu
        self.maxpool = backbone.maxpool

        self.layer1 = backbone.layer1
        self.layer2 = backbone.layer2
        self.layer3 = backbone.layer3
        self.layer4 = backbone.layer4

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.requires_grad = False

        self.stride = stride
        self.spatial_size = [
            self.configs.size_cropped_images_h // self.stride,
            self.configs.size_cropped_images_w // self.stride
        ]
        if self.stride == 8:
            self.layer3.apply(partial(_nostride2dilation, dilation=2))
            self.layer4.apply(partial(_nostride2dilation, dilation=4))
        elif self.stride == 16:
            self.layer4.apply(partial(_nostride2dilation, dilation=2))

        self.score_layer4 = _FCNHead(512, outc=outc)
        self.score_layer3 = _FCNHead(256, outc=outc)
コード例 #2
0
    def __init__(self, configs, outc=6, stride=32, pretrained=True):
        super(FCN8s_SAM_MS, self).__init__()
        # spatial attention module -> fusion -> score_layer
        self.name = 'FCN8s_SAM_MS'
        self.configs = configs

        backbone = models.resnet34(pretrained=pretrained)

        self.conv0 = backbone.conv1
        self.bn0 = backbone.bn1
        self.relu0 = backbone.relu
        self.maxpool = backbone.maxpool

        self.layer1 = backbone.layer1
        self.layer2 = backbone.layer2
        self.layer3 = backbone.layer3
        self.layer4 = backbone.layer4

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.requires_grad = False

        self.stride = stride
        self.spatial_size = [
            self.configs.size_cropped_images_h // self.stride,
            self.configs.size_cropped_images_w // self.stride
        ]
        if self.stride == 8:
            self.layer3.apply(partial(_nostride2dilation, dilation=2))
            self.layer4.apply(partial(_nostride2dilation, dilation=4))
        elif self.stride == 16:
            self.layer4.apply(partial(_nostride2dilation, dilation=2))

        self.score_layer = _FCNHead(128, outc=outc)

        self.ada_layer4 = nn.Conv2d(512, 256, 1, 1, 0, bias=False)
        self.ada_layer3 = nn.Conv2d(256, 128, 1, 1, 0, bias=False)
        self.ada_layer2 = nn.Conv2d(128, 128, 1, 1, 0, bias=False)

        self.sampling_conv1 = Sampling_Module(128)
        self.sampling_conv2 = Sampling_Module(256)
        self.sampling_conv3 = Sampling_Module(512)

        self.sa_counter4 = SAM_counterpart(512)
        self.sa_counter3 = SAM_counterpart(256)
        self.sa_counter2 = SAM_counterpart(128)
コード例 #3
0
    def __init__(self, configs, outc=6, stride=32, pretrained=True):
        super(FCN8s_SAM_M_resnet101, self).__init__()
        # SAM -> fusion -> score_layer
        self.name = 'FCN8s_SAM_M_resnet101'
        self.configs = configs

        backbone = models.resnet101(pretrained=pretrained)

        self.conv0 = backbone.conv1
        self.bn0 = backbone.bn1
        self.relu0 = backbone.relu
        self.maxpool = backbone.maxpool

        self.layer1 = backbone.layer1  # 256
        self.layer2 = backbone.layer2  # 512
        self.layer3 = backbone.layer3  # 1024
        self.layer4 = backbone.layer4  # 2048

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.requires_grad = False

        self.stride = stride
        self.spatial_size = [
            self.configs.size_cropped_images_h // self.stride,
            self.configs.size_cropped_images_w // self.stride
        ]
        if self.stride == 8:
            self.layer3.apply(partial(_nostride2dilation, dilation=2))
            self.layer4.apply(partial(_nostride2dilation, dilation=4))
        elif self.stride == 16:
            self.layer4.apply(partial(_nostride2dilation, dilation=2))

        self.score_layer = _FCNHead(512, outc=outc)

        self.ada_layer4 = nn.Conv2d(2048, 1024, 1, 1, 0, bias=False)
        self.ada_layer3 = nn.Conv2d(1024, 512, 1, 1, 0, bias=False)
        self.ada_layer2 = nn.Conv2d(512, 512, 1, 1, 0, bias=False)

        self.sampling_conv1 = Sampling_Module(512)
        self.sampling_conv2 = Sampling_Module(1024)
        self.sampling_conv3 = Sampling_Module(2048)
コード例 #4
0
ファイル: UNet.py プロジェクト: mrluin/SANet-PyTorch
    def __init__(self, configs, outc=6, stride=32, pretrained=True):
        super(UNet_resnet, self).__init__()

        self.configs= configs
        self.name = 'Unet_resnet34'

        backbone = models.resnet34(pretrained=pretrained)

        self.conv0 = backbone.conv1
        self.bn0 = backbone.bn1
        self.relu0 = backbone.relu
        self.maxpool = backbone.maxpool # 64

        self.layer1 = backbone.layer1 # 64
        self.layer2 = backbone.layer2 # 128
        self.layer3 = backbone.layer3 # 256
        self.layer4 = backbone.layer4 # 512

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.requires_grad = False

        self.stride = stride
        self.spatial_size = [self.configs.size_cropped_images_h // self.stride,
                             self.configs.size_cropped_images_w // self.stride]
        if self.stride == 8:
            self.layer3.apply(partial(_nostride2dilation, dilation=2))
            self.layer4.apply(partial(_nostride2dilation, dilation=4))
        elif self.stride == 16:
            self.layer4.apply(partial(_nostride2dilation, dilation=2))

        self.layer5 = down(in_ch=512, out_ch=512) #64

        self.up4 = double_conv(1024, 256)
        self.up3 = double_conv(512, 128)
        self.up2 = double_conv(256, 64)

        self.score_layer = _FCNHead(64, outc=outc)
コード例 #5
0
    def __init__(self, configs, outc=6, stride=16, pretrained=True):
        super(DeepLabv3_plus, self).__init__()

        self.name = 'deeplabv3_plus-resnet34'
        self.configs = configs
        backbone = models.resnet34(pretrained=pretrained)

        self.conv0 = backbone.conv1
        self.bn0 = backbone.bn1
        self.relu0 = backbone.relu
        self.maxpool = backbone.maxpool

        self.layer1 = backbone.layer1
        self.layer2 = backbone.layer2
        self.layer3 = backbone.layer3
        self.layer4 = backbone.layer4

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.requires_grad = False

        self.stride = stride
        self.spatial_size = [
            self.configs.size_cropped_images_h // self.stride,
            self.configs.size_cropped_images_w // self.stride
        ]
        if self.stride == 8:
            self.layer3.apply(partial(_nostride2dilation, dilation=2))
            self.layer4.apply(partial(_nostride2dilation, dilation=4))
        elif self.stride == 16:
            self.layer4.apply(partial(_nostride2dilation, dilation=2))

        self.low_level_adaptor = ConvBnRelu(64, 64, 1, 1, 0)
        # concat with output from aspp [128, 64]
        self.aspp_layer = ASPP(inc=512, stride=16)
        self.score_layer = _FCNHead(192, outc=outc)
コード例 #6
0
ファイル: DeepLabv3.py プロジェクト: mrluin/SANet-PyTorch
    def __init__(self, configs, outc=6, stride=16, pretrained=True):
        super(DeepLabv3, self).__init__()

        self.configs = configs
        self.name='DeepLabv3-resnet34'

        backbone = models.resnet34(pretrained=pretrained)

        self.conv0 = backbone.conv1
        self.bn0 = backbone.bn1
        self.relu0 = backbone.relu
        self.maxpool = backbone.maxpool

        self.layer1 = backbone.layer1
        self.layer2 = backbone.layer2
        self.layer3 = backbone.layer3
        self.layer4 = backbone.layer4

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.requires_grad = False

        self.stride = stride
        self.spatial_size = [self.configs.size_cropped_images_h // self.stride,
                             self.configs.size_cropped_images_w // self.stride]
        if self.stride == 8:
            self.layer3.apply(partial(_nostride2dilation, dilation=2))
            self.layer4.apply(partial(_nostride2dilation, dilation=4))
        elif self.stride == 16:
            self.layer4.apply(partial(_nostride2dilation, dilation=2))

        # after con1x1
        self.aspp = ASPP(inc = 512, stride=stride) # out 128c

        # classifier
        self.score_layer = _FCNHead(128, outc=outc)