Пример #1
0
    def __init__(self,
                 out_channels,
                 pretrained=True,
                 norm=nn.BatchNorm2d,
                 freeze_bn=False):
        super().__init__()

        self.freeze_bn = freeze_bn

        res = resnet50(pretrained)
        res.fc = None

        self.inplanes = res.inplanes
        self.encoder1 = nn.Sequential(
            res.conv1,
            res.bn1,
            res.relu,
            res.maxpool,
            res.layer1,  # ceil(size / 4)
        )

        self.ASPP = ASPP(2048, norm=norm)
        self.layer4 = res.layer4
        self.layer3 = res.layer3
        self.encoder2 = nn.Sequential(
            res.layer2,  # ceil(size / 8)
            self.layer3,  # ceil(size / 16)
            self.layer4,
            self.ASPP)

        self.layer4[0].dilation = (2, 2)
        for m in self.layer4.modules():
            if isinstance(m, nn.modules.Conv2d):
                m.stride = 1

        for m in self.encoder1.modules():
            if isinstance(m, nn.modules.ReLU):
                m.inplace = True

        for m in self.encoder2.modules():
            if isinstance(m, nn.modules.ReLU):
                m.inplace = True

        self.lowDecoder = nn.Sequential(
            nn.Conv2d(256, 48, 1, 1, bias=False),
            nn.BatchNorm2d(48),
            # nn.ReLU(True)
        )
        self.final = nn.Sequential(nn.Conv2d(304, 256, 3, 1, bias=False),
                                   nn.BatchNorm2d(256), nn.ReLU(True),
                                   nn.Conv2d(256, 256, 3, 1, bias=False),
                                   nn.BatchNorm2d(256), nn.ReLU(True),
                                   nn.Conv2d(256, out_channels, 1))
Пример #2
0
    def __init__(self, num_classes):  #use encoder to pass pretrained encoder
        super(ERFNet,self).__init__()

        self.encoder = Encoder(num_classes)
        self.decoder = Decoder(1)
        self.aspp = ASPP(dim_in=128,dim_out=128,rate=1,bn_mom=0.007)
        self.psp_layer = PyramidPooling('psp', 128, 128,
                                        norm_layer=nn.BatchNorm2d)
Пример #3
0
 def __init__(self):
     super(Net, self).__init__()
     self.down = True
     self.startconv = nn.Conv2d(1, 3, kernel_size=1)
     self.basemodel = smp.Unet(
         encoder_name='efficientnet-b0',
         encoder_weights='imagenet',
         in_channels=3,
         classes=1
     )
     self.planes = [32, 48, 136, 384]
     self.down = False
     self.center = ASPP(self.planes[3], self.planes[2])
Пример #4
0
def attn_reg(opt, input_size, lossfxn):

    img_input = Input(shape=input_size, name='input_scale1')
    scale_img_2 = AveragePooling2D(pool_size=(2, 2),
                                   name='input_scale2')(img_input)
    scale_img_3 = AveragePooling2D(pool_size=(2, 2),
                                   name='input_scale3')(scale_img_2)
    scale_img_4 = AveragePooling2D(pool_size=(2, 2),
                                   name='input_scale4')(scale_img_3)

    conv1 = UnetConv2D(img_input, 32, is_batchnorm=True, name='conv1')
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    input2 = Conv2D(64, (3, 3),
                    padding='same',
                    activation='relu',
                    name='conv_scale2')(scale_img_2)
    input2 = concatenate([input2, pool1], axis=3)
    conv2 = UnetConv2D(input2, 64, is_batchnorm=True, name='conv2')
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    input3 = Conv2D(128, (3, 3),
                    padding='same',
                    activation='relu',
                    name='conv_scale3')(scale_img_3)
    input3 = concatenate([input3, pool2], axis=3)
    conv3 = UnetConv2D(input3, 128, is_batchnorm=True, name='conv3')
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    input4 = Conv2D(256, (3, 3),
                    padding='same',
                    activation='relu',
                    name='conv_scale4')(scale_img_4)
    input4 = concatenate([input4, pool3], axis=3)
    conv4 = UnetConv2D(input4, 64, is_batchnorm=True, name='conv4')
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    #center = UnetConv2D(pool4, 512, is_batchnorm=True, name='center')
    center = ASPP(pool4, 512, name='center')

    g1 = UnetGatingSignal(center, is_batchnorm=True, name='g1')
    attn1 = AttnGatingBlock(conv4, g1, 128, '_1')
    up1 = concatenate([
        Conv2DTranspose(32, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=kinit)(center), attn1
    ],
                      name='up1')

    g2 = UnetGatingSignal(up1, is_batchnorm=True, name='g2')
    attn2 = AttnGatingBlock(conv3, g2, 64, '_2')
    up2 = concatenate([
        Conv2DTranspose(64, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=kinit)(up1), attn2
    ],
                      name='up2')

    g3 = UnetGatingSignal(up1, is_batchnorm=True, name='g3')
    attn3 = AttnGatingBlock(conv2, g3, 32, '_3')
    up3 = concatenate([
        Conv2DTranspose(32, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=kinit)(up2), attn3
    ],
                      name='up3')

    up4 = concatenate([
        Conv2DTranspose(32, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=kinit)(up3), conv1
    ],
                      name='up4')

    conv6 = UnetConv2D(up1, 256, is_batchnorm=True, name='conv6')
    conv7 = UnetConv2D(up2, 128, is_batchnorm=True, name='conv7')
    conv8 = UnetConv2D(up3, 64, is_batchnorm=True, name='conv8')
    conv9 = UnetConv2D(up4, 32, is_batchnorm=True, name='conv9')

    out6 = Conv2D(1, (1, 1), activation='sigmoid', name='pred1')(conv6)
    out7 = Conv2D(1, (1, 1), activation='sigmoid', name='pred2')(conv7)
    out8 = Conv2D(1, (1, 1), activation='sigmoid', name='pred3')(conv8)
    out9 = Conv2D(1, (1, 1), activation='sigmoid', name='final')(conv9)

    model = Model(inputs=[img_input], outputs=[out6, out7, out8, out9])

    loss = {
        'pred1': lossfxn,
        'pred2': lossfxn,
        'pred3': lossfxn,
        'final': losses.tversky_loss
    }

    loss_weights = {'pred1': 1, 'pred2': 1, 'pred3': 1, 'final': 1}
    model.compile(optimizer=opt,
                  loss=loss,
                  loss_weights=loss_weights,
                  metrics=[losses.dsc])
    return model
Пример #5
0
def attn_unet(opt, input_size, lossfxn):
    inputs = Input(shape=input_size)
    conv1 = UnetConv2D(inputs, 32, is_batchnorm=True, name='conv1')
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = UnetConv2D(pool1, 32, is_batchnorm=True, name='conv2')
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = UnetConv2D(pool2, 64, is_batchnorm=True, name='conv3')
    #conv3 = Dropout(0.2,name='drop_conv3')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = UnetConv2D(pool3, 64, is_batchnorm=True, name='conv4')
    #conv4 = Dropout(0.2, name='drop_conv4')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    #center = UnetConv2D(pool4, 128, is_batchnorm=True, name='center')
    center = ASPP(pool4, 128, name='center')

    g1 = UnetGatingSignal(center, is_batchnorm=True, name='g1')
    attn1 = AttnGatingBlock(conv4, g1, 128, '_1')
    up1 = concatenate([
        Conv2DTranspose(32, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=kinit)(center), attn1
    ],
                      name='up1')

    g2 = UnetGatingSignal(up1, is_batchnorm=True, name='g2')
    attn2 = AttnGatingBlock(conv3, g2, 64, '_2')
    up2 = concatenate([
        Conv2DTranspose(64, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=kinit)(up1), attn2
    ],
                      name='up2')

    g3 = UnetGatingSignal(up1, is_batchnorm=True, name='g3')
    attn3 = AttnGatingBlock(conv2, g3, 32, '_3')
    up3 = concatenate([
        Conv2DTranspose(32, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=kinit)(up2), attn3
    ],
                      name='up3')

    up4 = concatenate([
        Conv2DTranspose(32, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=kinit)(up3), conv1
    ],
                      name='up4')
    out = Conv2D(1, (1, 1),
                 activation='sigmoid',
                 kernel_initializer=kinit,
                 name='final')(up4)

    model = Model(inputs=[inputs], outputs=[out])
    model.compile(optimizer=opt,
                  loss=lossfxn,
                  metrics=[losses.dsc, losses.tp, losses.tn])
    return model
Пример #6
0
    def __init__(self, pretrained=True, num_classes=-1):
        super(deeplabv3plus, self).__init__()
        self.backbone = None
        self.backbone_layers = None
        input_channel = 2048
        self.aspp = ASPP(dim_in=input_channel,
                         dim_out=256,
                         rate=16 // 16,
                         bn_mom=0.0003)
        self.dropout1 = nn.Dropout(0.5)
        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
        self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=16 //
                                                    8)  #16//4

        indim = 728
        shallow1_dim = 64
        self.shortcut_conv1_1 = nn.Sequential(
            nn.Conv2d(indim, shallow1_dim, 1, 1, padding=1 // 2, bias=True),
            SynchronizedBatchNorm2d(shallow1_dim, momentum=0.0003),
            nn.ReLU(inplace=True),
        )
        self.cat_conv1_1 = nn.Sequential(
            nn.Conv2d(256 + shallow1_dim, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )
        indim = 256
        shallow2_dim = 32
        self.shortcut_conv1_2 = nn.Sequential(
            nn.Conv2d(indim, shallow2_dim, 1, 1, padding=1 // 2, bias=True),
            SynchronizedBatchNorm2d(shallow2_dim, momentum=0.0003),
            nn.ReLU(inplace=True),
        )
        self.cat_conv1_2 = nn.Sequential(
            nn.Conv2d(256 + shallow2_dim, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )
        # self.predict5x5 = nn.Conv2d(256, 256, 5, 1, padding=2)
        self.predict5x5 = nn.Sequential(
            nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )

        self.cls_conv = nn.Conv2d(256, num_classes, 1, 1, padding=0)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.backbone = xception.xception(pretrained=pretrained, os=16)
        self.backbone_layers = self.backbone.get_layers()
Пример #7
0
class DeepLabV3Plus(nn.Module):
    def __init__(self,
                 out_channels,
                 pretrained=True,
                 norm=nn.BatchNorm2d,
                 freeze_bn=False):
        super().__init__()

        self.freeze_bn = freeze_bn

        res = resnet50(pretrained)
        res.fc = None

        self.inplanes = res.inplanes
        self.encoder1 = nn.Sequential(
            res.conv1,
            res.bn1,
            res.relu,
            res.maxpool,
            res.layer1,  # ceil(size / 4)
        )

        self.ASPP = ASPP(2048, norm=norm)
        self.layer4 = res.layer4
        self.layer3 = res.layer3
        self.encoder2 = nn.Sequential(
            res.layer2,  # ceil(size / 8)
            self.layer3,  # ceil(size / 16)
            self.layer4,
            self.ASPP)

        self.layer4[0].dilation = (2, 2)
        for m in self.layer4.modules():
            if isinstance(m, nn.modules.Conv2d):
                m.stride = 1

        for m in self.encoder1.modules():
            if isinstance(m, nn.modules.ReLU):
                m.inplace = True

        for m in self.encoder2.modules():
            if isinstance(m, nn.modules.ReLU):
                m.inplace = True

        self.lowDecoder = nn.Sequential(
            nn.Conv2d(256, 48, 1, 1, bias=False),
            nn.BatchNorm2d(48),
            # nn.ReLU(True)
        )
        self.final = nn.Sequential(nn.Conv2d(304, 256, 3, 1, bias=False),
                                   nn.BatchNorm2d(256), nn.ReLU(True),
                                   nn.Conv2d(256, 256, 3, 1, bias=False),
                                   nn.BatchNorm2d(256), nn.ReLU(True),
                                   nn.Conv2d(256, out_channels, 1))

    def forward(self, x):
        # Encoder
        enc1 = self.encoder1(x)
        enc2 = self.encoder2(enc1)

        # Decoder
        lowfeat = self.lowDecoder(enc1)
        dec = F.upsample(enc2, (lowfeat.size(2), lowfeat.size(3)),
                         mode='bilinear',
                         align_corners=True)
        dec = torch.cat([dec, lowfeat], 1)

        final = self.final(dec)
        final = F.upsample(final, (x.size(2), x.size(3)),
                           mode='bilinear',
                           align_corners=True)
        final = F.log_softmax(final, dim=1)

        return final

    def train(self, mode=True):
        super(DeepLabV3Plus, self).train(mode)

        if self.freeze_bn:
            for module in self.modules():
                if isinstance(module, nn.modules.BatchNorm1d):
                    module.eval()
                if isinstance(module, nn.modules.BatchNorm2d):
                    module.eval()
                if isinstance(module, nn.modules.BatchNorm3d):
                    module.eval()

    def changeStride(self, ostride=8):
        self.layer4[0].dilation = (4, 4)
        for m in self.layer4.modules():
            if isinstance(m, nn.modules.Conv2d):
                m.stride = 1

        self.layer3[0].dilation = (2, 2)
        for m in self.layer3.modules():
            if isinstance(m, nn.modules.Conv2d):
                m.stride = 1

        self.ASPP.changeStride()