Esempio n. 1
0
    def __init__(self, img_ch=1, output_ch=1):
        super(AttU_Net, self).__init__()

        n1 = 16
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2)

        self.Conv1 = conv_block2(img_ch, filters[0])
        self.Conv2 = conv_block(filters[0], filters[1])
        self.Conv3 = conv_block(filters[1], filters[2])
        self.Conv4 = conv_block(filters[2], filters[3])
        self.Conv5 = conv_block(filters[3], filters[4])
        self.do1 = nn.Dropout3d(0.5)

        self.Up5 = up_conv(filters[4], filters[3])
        self.Att5 = Attention_block(F_g=filters[3],
                                    F_l=filters[3],
                                    F_int=filters[2])
        self.Up_conv5 = conv_block(filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Att4 = Attention_block(F_g=filters[2],
                                    F_l=filters[2],
                                    F_int=filters[1])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Att3 = Attention_block(F_g=filters[1],
                                    F_l=filters[1],
                                    F_int=filters[0])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Att2 = Attention_block(F_g=filters[0], F_l=filters[0], F_int=32)
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv3d(filters[0],
                              output_ch,
                              kernel_size=1,
                              stride=1,
                              padding=0)

        self.extra_feature1 = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)))
        self.extra_feature2 = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)))
        self.extra_feature3 = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)))
        self.extra_feature4 = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)))
        self.extra_feature5 = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)))
        self.extra_feature6 = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)))
        self.extra_feature7 = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)))
        self.extra_feature8 = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)))
        self.fc1 = nn.Sequential(nn.Linear(688, 256), nn.Dropout(0.5))
        self.fc2 = nn.Sequential(nn.Linear(256, 2), nn.LogSoftmax())

        rates = (1, 6, 12, 18)

        self.aspp1 = ASPP_module(16, 16, rate=rates[0])
        self.aspp2 = ASPP_module(16, 16, rate=rates[1])
        self.aspp3 = ASPP_module(16, 16, rate=rates[2])
        self.aspp4 = ASPP_module(16, 16, rate=rates[3])

        self.aspp_conv = nn.Conv3d(64, 64, 1)
        self.aspp_gn = nn.GroupNorm(32, 64)
        self.Out = nn.Conv3d(64, output_ch, kernel_size=1, stride=1, padding=0)
Esempio n. 2
0
 def __init__(self,doRate):
     super(Dropout3D_, self).__init__()
     self.do = nn.Dropout3d(doRate)
 def __init__(self, nChans=[16,1], kernel_size=3):
     super(MRConvNet, self).__init__()
     self.conv1 = nn.Conv3d(1, nChans[0], kernel_size, padding=1)
     self.bnorm = nn.BatchNorm3d(nChans[0])
     self.drop1 = nn.Dropout3d(p=0.3)
     self.conv2 = nn.Conv3d(nChans[0], nChans[1], kernel_size, padding=1)
Esempio n. 4
0
    def __init__(self, column_units):
        super(Model, self).__init__()
        self.block1 = nn.Sequential(
            nn.Conv3d(3,
                      32,
                      kernel_size=(3, 5, 5),
                      stride=(1, 2, 2),
                      dilation=(1, 1, 1),
                      padding=(1, 2, 2)),
            nn.BatchNorm3d(32),
            nn.ReLU(inplace=True),
            nn.Dropout3d(p=0.2),
        )

        self.block2 = nn.Sequential(
            nn.Conv3d(32,
                      64,
                      kernel_size=(3, 3, 3),
                      stride=1,
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(64),
            nn.ReLU(inplace=True),
            nn.Conv3d(64,
                      128,
                      kernel_size=(3, 3, 3),
                      stride=(1, 2, 2),
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.AvgPool3d(kernel_size=(3, 1, 1),
                         stride=(2, 1, 1),
                         padding=(1, 0, 0)),
            nn.Dropout3d(p=0.2),
        )

        self.block3 = nn.Sequential(
            nn.Conv3d(128,
                      128,
                      kernel_size=(3, 3, 3),
                      stride=1,
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128,
                      128,
                      kernel_size=(3, 3, 3),
                      stride=1,
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128,
                      256,
                      kernel_size=(3, 3, 3),
                      stride=(1, 2, 2),
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            nn.Dropout3d(p=0.2),
        )

        self.block4 = nn.Sequential(
            nn.Conv3d(256,
                      256,
                      kernel_size=(3, 3, 3),
                      stride=1,
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            nn.Conv3d(256,
                      256,
                      kernel_size=(3, 3, 3),
                      stride=1,
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            nn.Conv3d(256,
                      512,
                      kernel_size=(3, 3, 3),
                      stride=(1, 2, 2),
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(512),
            nn.ReLU(inplace=True),
            nn.Dropout3d(p=0.2),
        )

        self.block5 = nn.Sequential(
            nn.Conv3d(512,
                      512,
                      kernel_size=(3, 3, 3),
                      stride=1,
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(512),
            nn.ReLU(inplace=True),
            nn.Conv3d(512,
                      512,
                      kernel_size=(3, 3, 3),
                      stride=(1, 2, 2),
                      dilation=(1, 1, 1),
                      padding=(1, 1, 1)),
            nn.BatchNorm3d(512),
            nn.ReLU(inplace=True),
        )
Esempio n. 5
0
    def __init__(self, in_channels, n_classes, base_n_filter=8):
        super(Modified3DUNet, self).__init__()
        self.in_channels = in_channels
        self.n_classes = n_classes
        self.base_n_filter = base_n_filter

        self.lrelu = nn.LeakyReLU()
        self.dropout3d = nn.Dropout3d(p=0.6)
        self.upsacle = nn.Upsample(scale_factor=2, mode='nearest')
        self.softmax = nn.Softmax(dim=1)

        # Level 1 context pathway
        self.conv3d_c1_1 = nn.Conv3d(self.in_channels, self.base_n_filter, kernel_size=3, stride=1, padding=1,
                                     bias=False)
        self.conv3d_c1_2 = nn.Conv3d(self.base_n_filter, self.base_n_filter, kernel_size=3, stride=1, padding=1,
                                     bias=False)
        self.lrelu_conv_c1 = self.lrelu_conv(self.base_n_filter, self.base_n_filter)
        self.inorm3d_c1 = nn.InstanceNorm3d(self.base_n_filter)

        # Level 2 context pathway
        self.conv3d_c2 = nn.Conv3d(self.base_n_filter, self.base_n_filter * 2, kernel_size=3, stride=2, padding=1,
                                   bias=False)
        self.norm_lrelu_conv_c2 = self.norm_lrelu_conv(self.base_n_filter * 2, self.base_n_filter * 2)
        self.inorm3d_c2 = nn.InstanceNorm3d(self.base_n_filter * 2)

        # Level 3 context pathway
        self.conv3d_c3 = nn.Conv3d(self.base_n_filter * 2, self.base_n_filter * 4, kernel_size=3, stride=2, padding=1,
                                   bias=False)
        self.norm_lrelu_conv_c3 = self.norm_lrelu_conv(self.base_n_filter * 4, self.base_n_filter * 4)
        self.inorm3d_c3 = nn.InstanceNorm3d(self.base_n_filter * 4)

        # Level 4 context pathway
        self.conv3d_c4 = nn.Conv3d(self.base_n_filter * 4, self.base_n_filter * 8, kernel_size=3, stride=2, padding=1,
                                   bias=False)
        self.norm_lrelu_conv_c4 = self.norm_lrelu_conv(self.base_n_filter * 8, self.base_n_filter * 8)
        self.inorm3d_c4 = nn.InstanceNorm3d(self.base_n_filter * 8)

        # Level 5 context pathway, level 0 localization pathway
        self.conv3d_c5 = nn.Conv3d(self.base_n_filter * 8, self.base_n_filter * 16, kernel_size=3, stride=2, padding=1,
                                   bias=False)
        self.norm_lrelu_conv_c5 = self.norm_lrelu_conv(self.base_n_filter * 16, self.base_n_filter * 16)
        self.norm_lrelu_upscale_conv_norm_lrelu_l0 = self.norm_lrelu_upscale_conv_norm_lrelu(self.base_n_filter * 16,
                                                                                             self.base_n_filter * 8)

        self.conv3d_l0 = nn.Conv3d(self.base_n_filter * 8, self.base_n_filter * 8, kernel_size=1, stride=1, padding=0,
                                   bias=False)
        self.inorm3d_l0 = nn.InstanceNorm3d(self.base_n_filter * 8)

        # Level 1 localization pathway
        self.conv_norm_lrelu_l1 = self.conv_norm_lrelu(self.base_n_filter * 16, self.base_n_filter * 16)
        self.conv3d_l1 = nn.Conv3d(self.base_n_filter * 16, self.base_n_filter * 8, kernel_size=1, stride=1, padding=0,
                                   bias=False)
        self.norm_lrelu_upscale_conv_norm_lrelu_l1 = self.norm_lrelu_upscale_conv_norm_lrelu(self.base_n_filter * 8,
                                                                                             self.base_n_filter * 4)

        # Level 2 localization pathway
        self.conv_norm_lrelu_l2 = self.conv_norm_lrelu(self.base_n_filter * 8, self.base_n_filter * 8)
        self.conv3d_l2 = nn.Conv3d(self.base_n_filter * 8, self.base_n_filter * 4, kernel_size=1, stride=1, padding=0,
                                   bias=False)
        self.norm_lrelu_upscale_conv_norm_lrelu_l2 = self.norm_lrelu_upscale_conv_norm_lrelu(self.base_n_filter * 4,
                                                                                             self.base_n_filter * 2)

        # Level 3 localization pathway
        self.conv_norm_lrelu_l3 = self.conv_norm_lrelu(self.base_n_filter * 4, self.base_n_filter * 4)
        self.conv3d_l3 = nn.Conv3d(self.base_n_filter * 4, self.base_n_filter * 2, kernel_size=1, stride=1, padding=0,
                                   bias=False)
        self.norm_lrelu_upscale_conv_norm_lrelu_l3 = self.norm_lrelu_upscale_conv_norm_lrelu(self.base_n_filter * 2,
                                                                                             self.base_n_filter)

        # Level 4 localization pathway
        self.conv_norm_lrelu_l4 = self.conv_norm_lrelu(self.base_n_filter * 2, self.base_n_filter * 2)
        self.conv3d_l4 = nn.Conv3d(self.base_n_filter * 2, self.n_classes, kernel_size=1, stride=1, padding=0,
                                   bias=False)

        self.ds2_1x1_conv3d = nn.Conv3d(self.base_n_filter * 8, self.n_classes, kernel_size=1, stride=1, padding=0,
                                        bias=False)
        self.ds3_1x1_conv3d = nn.Conv3d(self.base_n_filter * 4, self.n_classes, kernel_size=1, stride=1, padding=0,
                                        bias=False)
Esempio n. 6
0
    def __init__(self):
        super(Net, self).__init__()
        # The first few layers consumes the most memory, so use simple convolution to save memory.
        # Call these layers preBlock, i.e., before the residual blocks of later layers.
        self.preBlock = nn.Sequential(
            nn.Conv3d(1, 24, kernel_size=3, padding=1), nn.BatchNorm3d(24),
            nn.ReLU(inplace=True), nn.Conv3d(24, 24, kernel_size=3, padding=1),
            nn.BatchNorm3d(24), nn.ReLU(inplace=True))

        # 3 poolings, each pooling downsamples the feature map by a factor 2.
        # 3 groups of blocks. The first block of each group has one pooling.
        num_blocks_forw = [2, 2, 3, 3]  # 前向传播的完整网络
        num_blocks_back = [3, 3]  # 反向传播需要更新参数的blocks(相当于keras的trainable)
        self.featureNum_forw = [24, 32, 64, 64, 64]  # ?
        self.featureNum_back = [128, 64, 64]  # ?
        for i in range(len(num_blocks_forw)):
            blocks = []
            for j in range(num_blocks_forw[i]):
                if j == 0:
                    blocks.append(
                        PostRes(self.featureNum_forw[i],
                                self.featureNum_forw[i + 1]))
                else:
                    blocks.append(
                        PostRes(self.featureNum_forw[i + 1],
                                self.featureNum_forw[i + 1]))
            setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks))

        for i in range(len(num_blocks_back)):
            blocks = []
            for j in range(num_blocks_back[i]):
                if j == 0:
                    if i == 0:
                        addition = 3
                    else:
                        addition = 0
                    blocks.append(
                        PostRes(
                            self.featureNum_back[i + 1] +
                            self.featureNum_forw[i + 2] + addition,
                            self.featureNum_back[i]))
                else:
                    blocks.append(
                        PostRes(self.featureNum_back[i],
                                self.featureNum_back[i]))
            setattr(self, 'back' + str(i + 2), nn.Sequential(*blocks))

        self.maxpool1 = nn.MaxPool3d(kernel_size=2,
                                     stride=2,
                                     return_indices=True)
        self.maxpool2 = nn.MaxPool3d(kernel_size=2,
                                     stride=2,
                                     return_indices=True)
        self.maxpool3 = nn.MaxPool3d(kernel_size=2,
                                     stride=2,
                                     return_indices=True)
        self.maxpool4 = nn.MaxPool3d(kernel_size=2,
                                     stride=2,
                                     return_indices=True)
        self.unmaxpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2)
        self.unmaxpool2 = nn.MaxUnpool3d(kernel_size=2, stride=2)

        self.path1 = nn.Sequential(
            nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),
            nn.BatchNorm3d(64), nn.ReLU(inplace=True))
        self.path2 = nn.Sequential(
            nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),
            nn.BatchNorm3d(64), nn.ReLU(inplace=True))
        self.drop = nn.Dropout3d(p=0.5, inplace=False)
        self.output = nn.Sequential(
            nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1),
            nn.ReLU(),
            # nn.Dropout3d(p = 0.3),
            nn.Conv3d(64, 5 * len(config['anchors']), kernel_size=1))
Esempio n. 7
0
    def __init__(
        self,
        maxpool,
        in_channels,
        out_channels,
        kernel_sizes,
        strides,
        dilatations,
        padding,
        batchnorm,
        n_classes,
        is_bayesian,
        max_fvc,
        n_kernels,
        random_node='output',
        activation=torch.nn.ReLU,
        n_res=3,
        gated=True,
        has_dense=True,
        resblocks=False,
    ):
        super().__init__()
        self.max_fvc = max_fvc
        if torch.cuda.is_available():
            device = 'cuda'
        else:
            device = 'cpu'
        self.maxpool = nn.MaxPool3d(maxpool, return_indices=False)

        self.is_bayesian = is_bayesian
        if is_bayesian:
            self.GaussianSample = GaussianUnitSample(37, 37)
        self.GaussianBlock = GaussianBlock(1, 1, n_kernels=n_kernels)
        self.device = device
        self.conv_layers = nn.ModuleList()
        self.deconv_layers = nn.ModuleList()
        self.bns = nn.ModuleList()
        self.dropout = nn.ModuleList()
        self.resconv = nn.ModuleList()
        self.activation = activation()

        self.n_res = n_res

        self.resblocks = resblocks
        self.has_dense = has_dense
        self.batchnorm = batchnorm
        self.a_dim = None
        for i, (ins, outs, ksize, stride, dilats, pad) in enumerate(
                zip(in_channels, out_channels, kernel_sizes, strides,
                    dilatations, padding)):
            if not gated:
                self.conv_layers += [
                    torch.nn.Conv3d(
                        in_channels=ins,
                        out_channels=outs,
                        kernel_size=ksize,
                        stride=stride,
                        padding=pad,
                        dilation=dilats,
                    )
                ]
            else:
                self.conv_layers += [
                    GatedConv3d(input_channels=ins,
                                output_channels=outs,
                                kernel_size=ksize,
                                stride=stride,
                                padding=pad,
                                dilation=dilats,
                                activation=nn.Tanh())
                ]
            if resblocks and i != 0:
                for _ in range(n_res):
                    self.resconv += [ResBlock3D(ins, outs, activation, device)]
            self.bns += [nn.BatchNorm3d(num_features=outs)]
        self.dropout3d = nn.Dropout3d(0.5)
        self.dense1 = torch.nn.Linear(in_features=out_channels[-1],
                                      out_features=32)
        self.dense1_bn = nn.BatchNorm1d(num_features=32)
        self.dense2 = torch.nn.Linear(
            in_features=32 + 5,
            out_features=n_classes)  # 5 parameters added here
        self.dense2_bn = nn.BatchNorm1d(num_features=n_classes)
        self.dropout = nn.Dropout(0.5)
        self.log_softmax = torch.nn.functional.log_softmax
Esempio n. 8
0
    def __init__(self,
                 in_channel=1,
                 out_channel=3,
                 filters=[8, 12, 16, 20, 24],
                 act='sigmoid',
                 aux=False):
        #def __init__(self, in_channel=1, out_channel=3, filters=[28, 36, 48, 64, 80]):
        super().__init__()

        # encoding path
        self.layer1_E = nn.Sequential(
            conv3d_bn_elu(in_planes=in_channel,
                          out_planes=filters[0],
                          kernel_size=(1, 5, 5),
                          stride=1,
                          padding=(0, 2, 2)),
            conv3d_bn_elu(in_planes=filters[0],
                          out_planes=filters[0],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_2d(filters[0], filters[0], projection=False))
        self.layer2_E = nn.Sequential(
            conv3d_bn_elu(in_planes=filters[0],
                          out_planes=filters[1],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_3d(filters[1], filters[1], projection=False))
        self.layer3_E = nn.Sequential(
            conv3d_bn_elu(in_planes=filters[1],
                          out_planes=filters[2],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_3d(filters[2], filters[2], projection=False))
        self.layer4_E = nn.Sequential(
            conv3d_bn_elu(in_planes=filters[2],
                          out_planes=filters[3],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_3d(filters[3], filters[3], projection=False))

        # center block
        self.center = nn.Sequential(
            conv3d_bn_elu(in_planes=filters[3],
                          out_planes=filters[4],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_3d(filters[4], filters[4], projection=True))

        # decoding path
        self.layer1_D = nn.Sequential(
            conv3d_bn_elu(in_planes=filters[0],
                          out_planes=filters[0],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_2d(filters[0], filters[0], projection=False),
            conv3d_bn_non(in_planes=filters[0],
                          out_planes=out_channel,
                          kernel_size=(1, 5, 5),
                          stride=1,
                          padding=(0, 2, 2)))
        self.layer2_D = nn.Sequential(
            conv3d_bn_elu(in_planes=filters[1],
                          out_planes=filters[1],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_3d(filters[1], filters[1], projection=False))
        self.layer3_D = nn.Sequential(
            conv3d_bn_elu(in_planes=filters[2],
                          out_planes=filters[2],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_3d(filters[2], filters[2], projection=False))
        self.layer4_D = nn.Sequential(
            conv3d_bn_elu(in_planes=filters[3],
                          out_planes=filters[3],
                          kernel_size=(1, 3, 3),
                          stride=1,
                          padding=(0, 1, 1)),
            residual_block_3d(filters[3], filters[3], projection=False))

        # pooling & upsample
        self.down = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
        self.up = nn.Upsample(scale_factor=(1, 2, 2),
                              mode='trilinear',
                              align_corners=False)

        # conv + upsample
        self.conv1 = conv3d_bn_elu(filters[1],
                                   filters[0],
                                   kernel_size=(1, 1, 1),
                                   padding=(0, 0, 0))
        self.conv2 = conv3d_bn_elu(filters[2],
                                   filters[1],
                                   kernel_size=(1, 1, 1),
                                   padding=(0, 0, 0))
        self.conv3 = conv3d_bn_elu(filters[3],
                                   filters[2],
                                   kernel_size=(1, 1, 1),
                                   padding=(0, 0, 0))
        self.conv4 = conv3d_bn_elu(filters[4],
                                   filters[3],
                                   kernel_size=(1, 1, 1),
                                   padding=(0, 0, 0))

        if act == 'tanh':
            self.act = nn.Tanh()
        else:
            self.act = nn.Sigmoid()

        self.aux = aux

        if self.aux == 1:
            self.dsn = nn.Sequential(
                conv3d_bn_elu(filters[2],
                              filters[2] // 2,
                              kernel_size=(1, 3, 3),
                              stride=1,
                              padding=(0, 1, 1)), nn.Dropout3d(0.1),
                nn.Conv3d(filters[2] // 2,
                          out_channel,
                          kernel_size=(1, 1, 1),
                          stride=1,
                          padding=(0, 0, 0),
                          bias=True))
        else:
            self.dsn = None
        #initialization
        ortho_init(self)
Esempio n. 9
0
    def __init__(self,
                 n_inp=1,
                 feats=[32, 64, 64, 128, 128, 256, 256],
                 blocks=[2, 2, 2, 2, 2, 2, 2],
                 n_pred_p=[1, 1, 1],
                 n_pred_d=[4, 4, 4],
                 L_output=[0, 1, 2],
                 abn=1,
                 dropout_ratio=0.0):
        super().__init__()

        self.relu = nn.ReLU(inplace=True)
        self.pixel_shuffle = nn.PixelShuffle(2)
        self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
        self.n_inp = n_inp
        if abn == 0:
            abnblock = ABN
        elif abn == 1:
            abnblock = InPlaceABN
        else:
            abnblock = InPlaceABNSync

        Conv3d_ABN.define_abn(abnblock)
        Bottleneck_ABN.define_abn(abnblock)

        self.strides = np.array([[1, 1, 1], [2, 2, 2], [4, 4, 4], [8, 8, 8],
                                 [16, 16, 16]])
        for i in range(6):
            if i == 0:
                setattr(self, 'l' + str(i),
                        build_layer(n_inp, feats[i], blocks[i]))
                setattr(self, 'r' + str(i),
                        build_layer(feats[i], feats[i], blocks[i], skip=True))
            else:
                setattr(
                    self, 'l' + str(i),
                    build_layer(feats[i - 1], feats[i], blocks[i], stride=2))
                setattr(
                    self, 'rnnlz' + str(i),
                    nn.LSTM(feats[i], feats[i] // 2, 1, bidirectional=True))
                setattr(self, 'rnnlz_bn' + str(i), abnblock(feats[i]))
                setattr(
                    self, 'rnnlx' + str(i),
                    nn.LSTM(feats[i], feats[i] // 2, 1, bidirectional=True))
                setattr(self, 'rnnlx_bn' + str(i), abnblock(feats[i]))
                setattr(
                    self, 'rnnly' + str(i),
                    nn.LSTM(feats[i], feats[i] // 2, 1, bidirectional=True))
                setattr(self, 'rnnly_bn' + str(i), abnblock(feats[i]))
                setattr(self, 'r' + str(i),
                        build_layer(feats[i], feats[i], blocks[i], skip=True))
                setattr(self, 'rt' + str(i),
                        build_layer(feats[i], feats[i - 1], blocks[i]))

        self.r0 = Conv3d_ABN(feats[0], feats[0], kernel_size=3, padding=1)
        self.out_layer = nn.Conv3d(feats[0], 1, kernel_size=1, stride=1)
        self.drop_out = nn.Dropout3d(p=dropout_ratio)
        for c_out1, c_out2, out in zip(n_pred_p, n_pred_d, L_output):
            #outp = build_layer(feats[out], feats[out] // 4, 0, skip=False)
            outp = nn.Conv3d(feats[out], feats[out], kernel_size=3, padding=1)
            setattr(self, 'before_out' + str(out) + 'p', outp)
            #outd = build_layer(feats[out], feats[out] // 4, 0, skip=False)
            outd = nn.Conv3d(feats[out], feats[out], kernel_size=3, padding=1)
            setattr(self, 'before_out' + str(out) + 'd', outd)

        for c_out1, c_out2, out in zip(n_pred_p, n_pred_d, L_output):
            setattr(self, 'out' + str(out) + 'p',
                    nn.Conv3d(feats[out], c_out1, kernel_size=3, padding=1))
            outd = nn.Conv3d(feats[out], c_out2, kernel_size=3, padding=1)
            #outd.weight.data.fill_(0.0)
            outd.bias.data.fill_(0.0)
            setattr(self, 'out' + str(out) + 'd', outd)

        self.L_output = L_output
        self.strides = self.strides[np.array(L_output)]
Esempio n. 10
0
    def __init__(
            self,
            n_classes=2,
            in_channels=3,
            norm_type='GN_8',  # default group norm with 8 groups
    ):
        super(RSANet, self).__init__()

        filters = [32, 64, 128, 256, 512]

        self.init_conv = nn.Conv3d(in_channels,
                                   filters[0],
                                   3,
                                   1,
                                   1,
                                   bias=False)
        self.dropout = nn.Dropout3d(0.2)

        self.conv1 = unetConvBlock(filters[0],
                                   filters[0],
                                   norm_type,
                                   num_convs=2)
        self.down1 = unetDownSample(filters[0],
                                    down_type='conv',
                                    norm_type=norm_type)

        self.conv2 = unetConvBlock(filters[0],
                                   filters[1],
                                   norm_type,
                                   num_convs=2)
        self.down2 = unetDownSample(filters[1],
                                    down_type='conv',
                                    norm_type=norm_type)

        self.conv3 = unetConvBlock(filters[1],
                                   filters[2],
                                   norm_type,
                                   num_convs=2)
        self.down3 = unetDownSample(filters[2],
                                    down_type='conv',
                                    norm_type=norm_type)

        self.conv4 = unetConvBlock(filters[2],
                                   filters[3],
                                   norm_type,
                                   num_convs=2)
        self.down4 = unetDownSample(filters[3],
                                    down_type='conv',
                                    norm_type=norm_type)

        self.center = unetConvBlock(filters[3],
                                    filters[4],
                                    norm_type,
                                    num_convs=2)

        self.up_concat4 = unetUpPadCatConv(filters[3], filters[4], False,
                                           norm_type)
        self.up_concat3 = unetUpPadCatConv(filters[2], filters[3], False,
                                           norm_type)
        self.up_concat2 = unetUpPadCatConv(filters[1], filters[2], False,
                                           norm_type)
        self.up_concat1 = unetUpPadCatConv(filters[0], filters[1], False,
                                           norm_type)

        self.final = nn.Sequential(
            nn.Conv3d(filters[0], filters[0], 3, 1, 1, bias=False),
            nn.Conv3d(filters[0], n_classes, 1, bias=False),
        )

        # --------- Recurrent slice-wise attention (RSA) module --------- #
        self.rsa_block = rsaBlock(filters[4])
Esempio n. 11
0
    def __init__(self,
                 input_channels,
                 feature_channels,
                 n_stack,
                 n_joint,
                 downsample=1,
                 configs=None):
        super(V2V_HG, self).__init__()
        self.input_channels = input_channels
        self.n_stack = n_stack
        self.n_joint = n_joint
        self.configs = configs

        # fix feature_channels to n_joitn(=22) like in SkelVolNet
        # feature_channels = n_joint
        self.feature_channels = feature_channels
        if configs['HG_type'] == 'double':
            HG = HG_double
        elif configs['HG_type'] == 'double2':
            HG = HG_double
        elif configs['HG_type'] == 'double2_attention':
            HG = HG_double2_attention

        if downsample > 1:
            self.front_layers = nn.Sequential(
                Basic3DBlock(input_channels, feature_channels, 5),
                Res3DBlock(feature_channels, feature_channels),
                Pool3DBlock(int(downsample), feature_channels))
        else:
            self.front_layers = nn.Sequential(
                Basic3DBlock(input_channels, feature_channels, 5),
                Res3DBlock(feature_channels, feature_channels))
        self.hg_1 = HG(input_channels=feature_channels,
                       output_channels=feature_channels,
                       N=88 // downsample)
        self.joint_output_1 = nn.Sequential(
            Res3DBlock(feature_channels, feature_channels // 2),
            Basic3DBlock(feature_channels // 2, feature_channels // 2, 1),
            nn.Dropout3d(p=0.2),
            nn.Conv3d(feature_channels // 2,
                      n_joint,
                      kernel_size=1,
                      stride=1,
                      padding=0))

        if n_stack > 1:
            self.hg_list = nn.ModuleList([
                HG(input_channels=feature_channels + n_joint,
                   output_channels=feature_channels,
                   N=88 // downsample) for i in range(1, n_stack)
            ])
            self.joint_output_list = nn.ModuleList([
                nn.Sequential(
                    Res3DBlock(feature_channels, feature_channels // 2),
                    Basic3DBlock(feature_channels // 2, feature_channels // 2,
                                 1), nn.Dropout3d(p=0.2),
                    nn.Conv3d(feature_channels // 2,
                              n_joint,
                              kernel_size=1,
                              stride=1,
                              padding=0)) for i in range(1, n_stack)
            ])
        self._initialize_weights()