Esempio n. 1
0
    def __init__(self, feature_scale=4, n_classes=3,
                 is_deconv=True, is_batchnorm=True):
        super(UnetR3D, self).__init__()
        filters = [64, 128, 256, 512]
        filters = [x // feature_scale for x in filters]

        # downsampling
        self.conv1    = UnetConv3D(1, filters[0], is_batchnorm)
        self.maxpool1 = nn.MaxPool3d(kernel_size=2)

        self.conv2    = UnetConv3D(filters[0], filters[1], is_batchnorm)
        self.maxpool2 = nn.MaxPool3d(kernel_size=2)

        self.conv3    = UnetConv3D(filters[1], filters[2], is_batchnorm)
        self.maxpool3 = nn.MaxPool3d(kernel_size=2)

        self.center   = UnetConv3D(filters[2], filters[3], is_batchnorm)

        # upsampling
        self.up_concat3 = UnetUpConv3D(filters[3], filters[2], is_deconv)
        self.up_concat2 = UnetUpConv3D(filters[2], filters[1], is_deconv)
        self.up_concat1 = UnetUpConv3D(filters[1], filters[0], is_deconv)

        # final conv (without any concat)
        self.final = nn.Conv3d(filters[0], n_classes, 1)
        self.final2 = nn.Softmax(dim=1)

        # initialise weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.apply(weights_init_kaiming)
            elif isinstance(m, nn.BatchNorm2d):
                m.apply(weights_init_kaiming)
Esempio n. 2
0
    def __init__(self, feature_scale=1, trans_feature=32, is_batchnorm=True):
        super(Unet3D_glob, self).__init__()
        layer_num = [2, 3, 3, 2]
        feature_num = [64, 128, 256, 512, 1024]
        feature_num = [x // feature_scale for x in feature_num]
        self.trans_feature = trans_feature

        # downsampling
        self.conv1 = UnetConv3D(1, feature_num[0],
                                is_batchnorm)  ## O : feature_num[0] * 64*64*32
        self.layer1 = ResBlock(layer_num[0],
                               feature_num[0],
                               feature_num[0] // 2,
                               feature_num[1],
                               stride=2,
                               padding=1)  ## feature_num[1] * 32*32*16
        self.layer2 = ResBlock(layer_num[1],
                               feature_num[1],
                               feature_num[1] // 2,
                               feature_num[2],
                               stride=2,
                               padding=1)  ## feature_num[2] * 16*16*8
        self.layer3 = ResBlock(layer_num[2],
                               feature_num[2],
                               feature_num[2] // 2,
                               feature_num[3],
                               stride=2,
                               padding=1)  ## feature_num[3] * 8*8*4
        self.layer4 = ResBlock(layer_num[3],
                               feature_num[3],
                               feature_num[3] // 2,
                               feature_num[4],
                               stride=2,
                               padding=1)  ## feature_num[4] * 4*4*2

        self.gcn1 = GCN(feature_num[0], self.trans_feature, k=(21, 21, 13))
        self.gcn2 = GCN(feature_num[1], self.trans_feature,
                        k=(11, 11, 7))  #gcn_i after layer-1
        self.gcn3 = GCN(feature_num[2], self.trans_feature, k=(7, 7, 5))
        self.gcn4 = GCN(feature_num[3], self.trans_feature, k=(3, 3, 3))
        self.gcn5 = GCN(feature_num[4], self.trans_feature, k=(3, 3, 1))

        self.ConBR1 = ConBR(2 * self.trans_feature, self.trans_feature)
        self.ConBR2 = ConBR(2 * self.trans_feature, self.trans_feature)
        self.ConBR3 = ConBR(2 * self.trans_feature, self.trans_feature)
        self.ConBR4 = ConBR(2 * self.trans_feature, self.trans_feature)

        # final conv (without any concat)
        self.ConBRF = ConBR(self.trans_feature, self.trans_feature)

        self.final = UnetConv3D(self.trans_feature, self.trans_feature,
                                is_batchnorm)
        self.final2 = nn.Conv3d(self.trans_feature, 1, 1)

        # initialise weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.apply(weights_init_kaiming)
            elif isinstance(m, nn.BatchNorm2d):
                m.apply(weights_init_kaiming)
Esempio n. 3
0
    def __init__(self, in_size, out_size, is_batchnorm=True):
        super(UnetUp3_CT, self).__init__()
        self.conv = UnetConv3D(in_size + out_size,
                               out_size,
                               is_batchnorm,
                               kernel_size=(3, 3, 3),
                               padding_size=(1, 1, 1))
        self.up = nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear')

        # initialise the blocks
        for m in self.children():
            if m.__class__.__name__.find('UnetConv3D') != -1:
                continue
            m.apply(weights_init_kaiming)
Esempio n. 4
0
    def __init__(self, feature_scale=1, trans_feature=24):
        super(Unet3D_glob1109, self).__init__()
        layer_num = [2, 2, 2, 2]
        feature_num = [64, 128, 128, 128, 128]
        feature_num = [x // feature_scale for x in feature_num]
        self.trans_feature = trans_feature

        # downsampling
        self.conv1 = UnetConv3D(
            1, feature_num[0],
            is_batchnorm=True)  ## O : feature_num[0] * 128*128*64
        self.layer1 = ResBlock(layer_num[0],
                               feature_num[0],
                               feature_num[0] // 2,
                               feature_num[1],
                               stride=2,
                               padding=1)  ## feature_num[1] * 64*64*32
        self.layer2 = ResBlock(layer_num[1],
                               feature_num[1],
                               feature_num[1] // 2,
                               feature_num[2],
                               stride=2,
                               padding=1)  ## feature_num[2] * 32*32*16
        self.layer3 = ResBlock(layer_num[2],
                               feature_num[2],
                               feature_num[2] // 2,
                               feature_num[3],
                               stride=2,
                               padding=1)  ## feature_num[3] * 16*16*8
        self.layer4 = ResBlock(layer_num[3],
                               feature_num[3],
                               feature_num[3] // 2,
                               feature_num[4],
                               stride=2,
                               padding=1)  ## feature_num[4] * 8*8*4

        self.gcn1 = nn.Conv3d(feature_num[0], self.trans_feature, 1)
        self.gcn2 = GCN(feature_num[1], self.trans_feature, k=(7, 7, 7))
        self.gcn3 = GCN(feature_num[2], self.trans_feature,
                        k=(7, 7, 7))  #gcn_i after layer-1
        self.gcn4 = GCN(feature_num[3], self.trans_feature, k=(7, 7, 7))
        self.gcn5 = GCN(feature_num[4], self.trans_feature, k=(3, 3, 3))

        self.ConBR1 = ConBR(self.trans_feature, self.trans_feature)
        self.ConBR2 = ConBR(self.trans_feature, self.trans_feature)
        self.ConBR3 = ConBR(self.trans_feature, self.trans_feature)
        self.ConBR4 = ConBR(self.trans_feature, self.trans_feature)

        # final conv (without any concat)
        self.final = nn.Sequential(
            nn.Conv3d(self.trans_feature,
                      self.trans_feature // 2,
                      3,
                      padding=1,
                      bias=False), nn.BatchNorm3d(self.trans_feature // 2),
            nn.ReLU(inplace=True), nn.Dropout(.1),
            nn.Conv3d(self.trans_feature // 2, 3, 1), nn.Softmax(dim=1))

        # initialise weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.apply(weights_init_kaiming)
            elif isinstance(m, nn.BatchNorm2d):
                m.apply(weights_init_kaiming)
Esempio n. 5
0
    def __init__(self, feature_scale=1, trans_feature=64, is_batchnorm=True):
        super(Unet3D_glob2, self).__init__()
        layer_num = [3, 3, 4, 3]
        feature_num = [64, 128, 256, 512, 1024]
        feature_num = [x // feature_scale for x in feature_num]
        self.trans_feature = trans_feature

        # downsampling
        self.conv1 = UnetConv3D(1, feature_num[0],
                                is_batchnorm)  ## O : feature_num[0] * 64*64*32
        self.layer1 = ResBlock(layer_num[0],
                               feature_num[0],
                               feature_num[0] // 2,
                               feature_num[1],
                               stride=2,
                               padding=1)  ## feature_num[1] * 32*32*16
        self.layer2 = ResBlock(layer_num[1],
                               feature_num[1],
                               feature_num[1] // 2,
                               feature_num[2],
                               stride=2,
                               padding=1)  ## feature_num[2] * 16*16*8
        self.layer3 = ResBlock(layer_num[2],
                               feature_num[2],
                               feature_num[2] // 2,
                               feature_num[3],
                               stride=2,
                               padding=1)  ## feature_num[3] * 8*8*4
        self.layer4 = ResBlock(layer_num[3],
                               feature_num[3],
                               feature_num[3] // 2,
                               feature_num[4],
                               stride=2,
                               padding=1)  ## feature_num[4] * 4*4*2

        self.gcn1 = GCN(feature_num[1], self.trans_feature,
                        k=(7, 7, 5))  #gcn_i after layer-1
        self.gcn2 = GCN(feature_num[2], self.trans_feature, k=(7, 7, 5))
        self.gcn3 = GCN(feature_num[3], self.trans_feature, k=(5, 5, 3))
        self.gcn4 = GCN(feature_num[4], self.trans_feature, k=(3, 3, 1))

        self.up1 = nn.ConvTranspose3d(2 * self.trans_feature,
                                      self.trans_feature,
                                      kernel_size=(4, 4, 4),
                                      stride=(2, 2, 2),
                                      padding=(1, 1, 1))
        self.up2 = nn.ConvTranspose3d(2 * self.trans_feature,
                                      self.trans_feature,
                                      kernel_size=(4, 4, 4),
                                      stride=(2, 2, 2),
                                      padding=(1, 1, 1))
        self.up3 = nn.ConvTranspose3d(2 * self.trans_feature,
                                      self.trans_feature,
                                      kernel_size=(4, 4, 4),
                                      stride=(2, 2, 2),
                                      padding=(1, 1, 1))

        # final conv (without any concat)
        #self.final = nn.Conv3d(self.trans_feature,1, 1)
        self.BR = ConBR(self.trans_feature, self.trans_feature)
        self.final = nn.Conv3d(self.trans_feature, 1, 1)

        # initialise weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.apply(weights_init_kaiming)
            elif isinstance(m, nn.BatchNorm2d):
                m.apply(weights_init_kaiming)