Esempio n. 1
0
    def __init__(self,
                 low_in_channels,
                 high_in_channels,
                 key_channels,
                 value_channels,
                 out_channels=None,
                 scale=1,
                 norm_layer=nn.BatchNorm2d,
                 psp_size=(1, 3, 6, 8)):
        super(_SelfAttentionBlock, self).__init__()
        self.scale = scale
        self.in_channels = low_in_channels
        self.out_channels = out_channels
        self.key_channels = key_channels
        self.value_channels = value_channels
        if out_channels == None:
            self.out_channels = high_in_channels
        self.pool = nn.MaxPool2d(kernel_size=(scale, scale))

        self.f_key = ConvBnRelu(self.in_channels,
                                self.key_channels,
                                1,
                                1,
                                0,
                                has_bn=True,
                                norm_layer=norm_layer,
                                has_relu=True,
                                has_bias=False)

        self.f_query = ConvBnRelu(high_in_channels,
                                  self.key_channels,
                                  1,
                                  1,
                                  0,
                                  has_bn=True,
                                  norm_layer=norm_layer,
                                  has_relu=True,
                                  has_bias=False)

        self.f_value = nn.Conv2d(in_channels=self.in_channels,
                                 out_channels=self.value_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        # self.f_value = Conv(self.in_channels, self.value_channels, 1, 1, 0)
        #self.W = Conv_DW(self.value_channels, self.out_channels, 1, 1, 0)
        self.W = nn.Conv2d(in_channels=self.value_channels,
                           out_channels=self.out_channels,
                           kernel_size=1,
                           stride=1,
                           padding=0)
        self.psp = PSPModule(psp_size)
        nn.init.constant_(self.W.weight, 0)
        nn.init.constant_(self.W.bias, 0)
Esempio n. 2
0
 def __init__(self,
              low_in_channels,
              high_in_channels,
              out_channels,
              key_channels,
              value_channels,
              dropout,
              sizes=([1]),
              norm_layer=nn.BatchNorm2d,
              psp_size=(1, 3, 6, 8)):
     super(AFNB, self).__init__()
     self.stages = []
     self.norm_type = norm_layer
     self.psp_size = psp_size
     self.stages = nn.ModuleList([
         self._make_stage([low_in_channels, high_in_channels], out_channels,
                          key_channels, value_channels, size)
         for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         ConvBnRelu(out_channels + high_in_channels,
                    out_channels,
                    1,
                    1,
                    0,
                    has_bn=True,
                    norm_layer=norm_layer,
                    has_relu=True,
                    has_bias=False), nn.Dropout2d(dropout))
Esempio n. 3
0
    def __init__(self, in_planes, out_planes, scale, norm_layer=nn.BatchNorm2d):
        super(UNetHead, self).__init__()
        self.conv_3x3 = ConvBnRelu(in_planes, 64, 3, 1, 1,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)
        self.conv_1x1 = nn.Conv2d(64,out_planes, kernel_size=1, stride=1, padding=0)

        self.scale = scale
Esempio n. 4
0
 def __init__(self,
              in_planes,
              out_planes,
              scale,
              is_aux=False,
              norm_layer=nn.BatchNorm2d,
              alpha=1):
     super(FPNetHead, self).__init__()
     if is_aux:
         self.conv_3x3 = ConvBnRelu(in_planes,
                                    int(np.rint(256 * alpha)),
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    has_bias=False)
     else:
         self.conv_3x3 = ConvBnRelu(in_planes,
                                    int(np.rint(64 * alpha)),
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    has_bias=False)
     # self.dropout = nn.Dropout(0.1)
     if is_aux:
         self.conv_1x1 = nn.Conv2d(int(np.rint(256 * alpha)),
                                   out_planes,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
     else:
         self.conv_1x1 = nn.Conv2d(int(np.rint(64 * alpha)),
                                   out_planes,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
     self.scale = scale
Esempio n. 5
0
    def __init__(self, out_planes, criterion, dice_criterion=None, is_training=True, norm_layer=nn.BatchNorm2d):
        super(NestedUNet, self).__init__()

        # self.args = args
        self.is_training = is_training

        self.layer0 = ConvBnRelu(3, 64, ksize=7, stride=2, pad=3, has_bn=True, has_relu=True, has_bias=False, norm_layer=norm_layer)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.criterion = criterion
        self.dice_criterion = dice_criterion
        nb_filter = [64, 128, 256, 512, 1024]

        self.pool = nn.MaxPool2d(2, 2)
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

        self.conv0_0 = VGGBlock(nb_filter[0], nb_filter[0], nb_filter[0])
        self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])
        self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])
        self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])
        self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])

        self.conv0_1 = VGGBlock(nb_filter[0]+nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_1 = VGGBlock(nb_filter[1]+nb_filter[2], nb_filter[1], nb_filter[1])
        self.conv2_1 = VGGBlock(nb_filter[2]+nb_filter[3], nb_filter[2], nb_filter[2])
        self.conv3_1 = VGGBlock(nb_filter[3]+nb_filter[4], nb_filter[3], nb_filter[3])

        self.conv0_2 = VGGBlock(nb_filter[0]*2+nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_2 = VGGBlock(nb_filter[1]*2+nb_filter[2], nb_filter[1], nb_filter[1])
        self.conv2_2 = VGGBlock(nb_filter[2]*2+nb_filter[3], nb_filter[2], nb_filter[2])

        self.conv0_3 = VGGBlock(nb_filter[0]*3+nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_3 = VGGBlock(nb_filter[1]*3+nb_filter[2], nb_filter[1], nb_filter[1])

        self.conv0_4 = VGGBlock(nb_filter[0]*4+nb_filter[1], nb_filter[0], nb_filter[0])


        self.heads = [UNetHead(64, out_planes, 4, norm_layer=norm_layer),
                UNetHead(64, out_planes, 4,  norm_layer=norm_layer),
                UNetHead(64, out_planes, 4,  norm_layer=norm_layer),
                UNetHead(64, out_planes, 4,  norm_layer=norm_layer),
        ]
        self.heads = nn.ModuleList(self.heads)
        self.business_layer = []
        self.business_layer.append(self.layer0)
        self.business_layer.append(self.conv0_0)
        self.business_layer.append(self.conv1_0)
        self.business_layer.append(self.conv2_0)
        self.business_layer.append(self.conv3_0)
        self.business_layer.append(self.conv4_0)

        self.business_layer.append(self.conv0_1)
        self.business_layer.append(self.conv1_1)
        self.business_layer.append(self.conv2_1)
        self.business_layer.append(self.conv3_1)

        self.business_layer.append(self.conv0_2)
        self.business_layer.append(self.conv1_2)
        self.business_layer.append(self.conv2_2)

        self.business_layer.append(self.conv0_3)
        self.business_layer.append(self.conv0_4)

        self.business_layer.append(self.heads)