def __init__(self, pretrained, num_classes):
        super(SegNet, self).__init__()
        vgg = models.vgg19_bn()
        if pretrained:
            vgg.load_state_dict(torch.load(pretrained_vgg19_bn))
        features = list(vgg.features.children())
        self.enc1 = nn.Sequential(*features[0:7])
        self.enc2 = nn.Sequential(*features[7:14])
        self.enc3 = nn.Sequential(*features[14:27])
        self.enc4 = nn.Sequential(*features[27:40])
        self.enc5 = nn.Sequential(*features[40:])

        # for m in self.modules():
        #     if isinstance(m, nn.Conv2d):
        #         m.requires_grad = False

        self.dec5 = nn.Sequential(
            *([nn.ConvTranspose2d(512, 512, kernel_size=2, stride=2)] +
              [nn.Conv2d(512, 512, kernel_size=3, padding=1),
               nn.BatchNorm2d(512),
               nn.ReLU(inplace=True)] * 4)
        )
        self.dec4 = _DecoderBlock(1024, 256, 4)
        self.dec3 = _DecoderBlock(512, 128, 4)
        self.dec2 = _DecoderBlock(256, 64, 2)
        self.dec1 = _DecoderBlock(128, num_classes, 2)
        initialize_weights(self.dec5, self.dec4, self.dec3, self.dec2, self.dec1)
示例#2
0
 def __init__(self, num_classes, pretrained=True):
     super(FCN32DenseNet, self).__init__()
     dense = models.densenet201()
     if pretrained:
         dense.load_state_dict(torch.load(dense201_path))
     self.features5 = dense.features
     self.fconv5 = nn.Sequential(
         nn.ReLU(inplace=True), nn.Conv2d(1920, num_classes, kernel_size=7))
     initialize_weights(self.fconv5)
示例#3
0
 def __init__(self, num_classes, pretrained=True):
     super(FCN32ResNet, self).__init__()
     res = models.resnet152()
     if pretrained:
         res.load_state_dict(torch.load(res152_path))
     self.features5 = nn.Sequential(res.conv1, res.bn1, res.relu,
                                    res.maxpool, res.layer1, res.layer2,
                                    res.layer3, res.layer4)
     self.fconv5 = nn.Conv2d(2048, num_classes, kernel_size=7)
     initialize_weights(self.fconv5)
示例#4
0
 def __init__(self, pretrained, num_classes):
     super(FCN16ResNet, self).__init__()
     res = models.resnet152()
     if pretrained:
         res.load_state_dict(torch.load(pretrained_res152))
     self.features4 = nn.Sequential(
         res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2, res.layer3
     )
     self.features5 = res.layer4
     self.fconv4 = nn.Conv2d(1024, num_classes, kernel_size=1)
     self.fconv5 = nn.Conv2d(2048, num_classes, kernel_size=7)
     initialize_weights(self.fconv4, self.fconv5)
示例#5
0
 def __init__(self, num_classes, pretrained=True):
     super(FCN32VGG, self).__init__()
     vgg = models.vgg19_bn()
     if pretrained:
         vgg.load_state_dict(torch.load(vgg19_bn_path))
     self.features5 = vgg.features
     self.fconv5 = nn.Sequential(
         nn.Conv2d(512, 4096, kernel_size=7), nn.ReLU(inplace=True),
         nn.Dropout(), nn.Conv2d(4096, 4096, kernel_size=1),
         nn.ReLU(inplace=True), nn.Dropout(),
         nn.Conv2d(4096, num_classes, kernel_size=1))
     initialize_weights(self.fconv5)
 def __init__(self, num_classes, pretrained=True):
     super(FCN16DenseNet, self).__init__()
     dense = models.densenet201()
     if pretrained:
         dense.load_state_dict(torch.load(dense201_path))
     features = list(dense.features.children())
     self.features4 = nn.Sequential(*features[:10])
     self.features5 = nn.Sequential(*features[10:])
     self.fconv4 = nn.Sequential(nn.BatchNorm2d(896), nn.ReLU(inplace=True),
                                 nn.Conv2d(896, num_classes, kernel_size=1))
     self.fconv5 = nn.Sequential(
         nn.ReLU(inplace=True), nn.Conv2d(1920, num_classes, kernel_size=7))
     initialize_weights(self.fconv4, self.fconv5)
    def __init__(self, num_classes, input_size, pretrained=True, use_aux=True):
        super(PSPNetDeform, self).__init__()
        self.input_size = input_size
        self.use_aux = use_aux
        resnet = models.resnet152()
        if pretrained:
            resnet.load_state_dict(torch.load(res152_path))
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                    resnet.maxpool)
        self.layer1 = resnet.layer1
        self.layer2 = resnet.layer2
        self.layer3 = resnet.layer3
        self.layer4 = resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.padding = (1, 1)
                m.stride = (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.padding = (1, 1)
                m.stride = (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for idx in range(len(self.layer3)):
            self.layer3[idx].conv2 = Conv2dDeformable(self.layer3[idx].conv2)
        for idx in range(len(self.layer4)):
            self.layer4[idx].conv2 = Conv2dDeformable(self.layer4[idx].conv2)
        self.ppm = PyramidPoolingModule((int(math.ceil(
            input_size[0] / 8.0)), int(math.ceil(input_size[1] / 8.0))), 2048,
                                        512, (1, 2, 3, 6))
        self.final = nn.Sequential(
            nn.Conv2d(4096, 512, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(512, momentum=.95), nn.ReLU(), nn.Dropout(0.1),
            nn.Conv2d(512, num_classes, kernel_size=1))
        if use_aux:
            self.aux_logits = nn.Sequential(
                PyramidPoolingModule((int(math.ceil(input_size[0] / 8.0)),
                                      int(math.ceil(input_size[1] / 8.0))),
                                     1024, 256, (1, 2, 3, 6)),
                nn.Conv2d(2048, 256, kernel_size=3, padding=1, bias=False),
                nn.BatchNorm2d(256, momentum=.95), nn.ReLU(), nn.Dropout(0.1),
                nn.Conv2d(256, num_classes, kernel_size=1))

        initialize_weights(self.ppm, self.final)
 def __init__(self, pretrained, num_classes):
     super(FCN8VGG, self).__init__()
     vgg = models.vgg19_bn()
     if pretrained:
         vgg.load_state_dict(torch.load(pretrained_vgg19_bn))
     features = list(vgg.features.children())
     self.features3 = nn.Sequential(*features[0:27])
     self.features4 = nn.Sequential(*features[27:40])
     self.features5 = nn.Sequential(*features[40:])
     self.fconv3 = nn.Conv2d(256, num_classes, kernel_size=1)
     self.fconv4 = nn.Conv2d(512, num_classes, kernel_size=1)
     self.fconv5 = nn.Sequential(
         nn.Conv2d(512, 4096, kernel_size=7), nn.ReLU(inplace=True),
         nn.Dropout(), nn.Conv2d(4096, 4096, kernel_size=1),
         nn.ReLU(inplace=True), nn.Dropout(),
         nn.Conv2d(4096, num_classes, kernel_size=1))
     initialize_weights(self.fconv3, self.fconv4, self.fconv5)
 def __init__(self, num_classes):
     super(UNet, self).__init__()
     self.enc1 = _EncoderBlock(3, 64)
     self.enc2 = _EncoderBlock(64, 128)
     self.enc3 = _EncoderBlock(128, 256)
     self.enc4 = _EncoderBlock(256, 512, dropout=True)
     self.center = _DecoderBlock(512, 1024, 512)
     self.dec4 = _DecoderBlock(1024, 512, 256)
     self.dec3 = _DecoderBlock(512, 256, 128)
     self.dec2 = _DecoderBlock(256, 128, 64)
     self.dec1 = nn.Sequential(
         nn.Conv2d(128, 64, kernel_size=3),
         nn.BatchNorm2d(64),
         nn.ReLU(inplace=True),
         nn.Conv2d(64, 64, kernel_size=3),
         nn.BatchNorm2d(64),
         nn.ReLU(inplace=True),
     )
     self.final = nn.Conv2d(64, num_classes, kernel_size=1)
     initialize_weights(self)
示例#10
0
    def __init__(self, num_classes, input_size, pretrained=True):
        super(GCN, self).__init__()
        self.input_size = input_size
        resnet = models.resnet152()
        if pretrained:
            resnet.load_state_dict(torch.load(res152_path))
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu)
        self.layer1 = nn.Sequential(resnet.maxpool, resnet.layer1)
        self.layer2 = resnet.layer2
        self.layer3 = resnet.layer3
        self.layer4 = resnet.layer4

        #  use large kernel with odd size
        gcm_ks = [[input_size[0] / 32 - 1, input_size[1] / 32 - 1],
                  [input_size[0] / 16 - 1, input_size[1] / 16 - 1],
                  [input_size[0] / 8 - 1, input_size[1] / 8 - 1],
                  [input_size[0] / 4 - 1, input_size[1] / 4 - 1]]
        gcm_ks = [(ks[0] - 1 if ks[0] % 2 == 0 else ks[0],
                   ks[1] - 1 if ks[1] % 2 == 0 else ks[1]) for ks in gcm_ks]
        self.gcm1 = GlobalConvModule(2048, num_classes, gcm_ks[0])
        self.gcm2 = GlobalConvModule(1024, num_classes, gcm_ks[1])
        self.gcm3 = GlobalConvModule(512, num_classes, gcm_ks[2])
        self.gcm4 = GlobalConvModule(256, num_classes, gcm_ks[3])

        self.brm1 = BoundaryRefineModule(num_classes)
        self.brm2 = BoundaryRefineModule(num_classes)
        self.brm3 = BoundaryRefineModule(num_classes)
        self.brm4 = BoundaryRefineModule(num_classes)
        self.brm5 = BoundaryRefineModule(num_classes)
        self.brm6 = BoundaryRefineModule(num_classes)
        self.brm7 = BoundaryRefineModule(num_classes)
        self.brm8 = BoundaryRefineModule(num_classes)
        self.brm9 = BoundaryRefineModule(num_classes)

        initialize_weights(self.gcm1, self.gcm2, self.gcm3, self.gcm4,
                           self.brm1, self.brm2, self.brm3, self.brm4,
                           self.brm5, self.brm6, self.brm7, self.brm8,
                           self.brm9)