Ejemplo n.º 1
0
    def __init__(self, pv):
        super(AlexNetExplicitTaco, self).__init__()
        self.conv1 = ConvTTN3d(in_channels=3,
                               out_channels=64,
                               kernel_size=11,
                               stride=(1, 3, 4),
                               padding=2,
                               project_variable=pv,
                               bias=True,
                               ksize=(0, 0),
                               fc_in=1,
                               hw=(150, 224))
        self.pool1 = MaxPool3d(kernel_size=3, stride=2)

        self.conv2 = ConvTTN3d(in_channels=64,
                               out_channels=192,
                               kernel_size=5,
                               padding=2,
                               project_variable=pv,
                               bias=True,
                               ksize=(0, 0),
                               fc_in=1,
                               hw=(23, 27))
        self.pool2 = MaxPool3d(kernel_size=3, stride=2)

        self.conv3 = ConvTTN3d(in_channels=192,
                               out_channels=384,
                               kernel_size=3,
                               padding=1,
                               project_variable=pv,
                               bias=True,
                               ksize=(0, 0),
                               fc_in=1,
                               hw=(11, 13))
        self.conv4 = ConvTTN3d(in_channels=384,
                               out_channels=256,
                               kernel_size=3,
                               padding=1,
                               project_variable=pv,
                               bias=True,
                               ksize=(0, 0),
                               fc_in=1,
                               hw=(11, 13))
        self.conv5 = ConvTTN3d(in_channels=256,
                               out_channels=256,
                               kernel_size=3,
                               padding=1,
                               project_variable=pv,
                               bias=True,
                               ksize=(0, 0),
                               fc_in=1,
                               hw=(11, 13))
        self.pool3 = MaxPool3d(kernel_size=3, stride=2)

        # self.pool4 = AdaptiveAvgPool3d(output_size=1)
        self.pool4 = AdaptiveAvgPool3d((1, 6, 6))

        self.fc1 = Linear(256 * 1 * 6 * 6, 4096)
        self.fc2 = Linear(4096, 4096)
        self.fc3 = Linear(4096, pv.label_size)
Ejemplo n.º 2
0
    def __init__(self, num_channels=1, feat_channels=[64, 256, 256, 512, 1024], residual='conv'):
        # residual: conv for residual input x through 1*1 conv across every layer for downsampling, None for removal of residuals

        super(UNet, self).__init__()

        # Encoder downsamplers
        self.pool1 = MaxPool3d((2, 2, 2))
        self.pool2 = MaxPool3d((2, 2, 2))
        self.pool3 = MaxPool3d((2, 2, 2))
        self.pool4 = MaxPool3d((2, 2, 2))

        # Encoder convolutions
        self.conv_blk1 = Conv3D_Block(num_channels, feat_channels[0], residual=residual)
        self.conv_blk2 = Conv3D_Block(feat_channels[0], feat_channels[1], residual=residual)
        self.conv_blk3 = Conv3D_Block(feat_channels[1], feat_channels[2], residual=residual)
        self.conv_blk4 = Conv3D_Block(feat_channels[2], feat_channels[3], residual=residual)
        self.conv_blk5 = Conv3D_Block(feat_channels[3], feat_channels[4], residual=residual)

        # Decoder convolutions
        self.dec_conv_blk4 = Conv3D_Block(2 * feat_channels[3], feat_channels[3], residual=residual)
        self.dec_conv_blk3 = Conv3D_Block(2 * feat_channels[2], feat_channels[2], residual=residual)
        self.dec_conv_blk2 = Conv3D_Block(2 * feat_channels[1], feat_channels[1], residual=residual)
        self.dec_conv_blk1 = Conv3D_Block(2 * feat_channels[0], feat_channels[0], residual=residual)

        # Decoder upsamplers
        self.deconv_blk4 = Deconv3D_Block(feat_channels[4], feat_channels[3])
        self.deconv_blk3 = Deconv3D_Block(feat_channels[3], feat_channels[2])
        self.deconv_blk2 = Deconv3D_Block(feat_channels[2], feat_channels[1])
        self.deconv_blk1 = Deconv3D_Block(feat_channels[1], feat_channels[0])

        # Final 1*1 Conv Segmentation map
        self.one_conv = Conv3d(feat_channels[0], num_channels, kernel_size=1, stride=1, padding=0, bias=True)

        # Activation function
        self.sigmoid = Sigmoid()
Ejemplo n.º 3
0
    def __init__(self, in_channels, out_channels=1 ):
        super(SmallUnet, self).__init__()
        ############### ENCODER ##############################
        #### CONV 1 ####
        self.conv1 = ConvBlock3D(in_channels=in_channels, out_channels=16, kernel_size=3, stride=1, dilation=2,
                                 pooling=None, activation=ReLU(inplace=True), same_padding=True)

        self.pool1 = MaxPool3d(kernel_size=2)
        #### CONV 2 ####
        self.conv2 = ConvBlock3D(in_channels=16, out_channels=32, kernel_size=3, stride=1, dilation=2,
                                 pooling=None, activation=ReLU(inplace=True), same_padding=True)

        self.pool2 = MaxPool3d(kernel_size=2)
        ############### DECODER ############################
        #### UPCONV 1 ####
        self.conv3 = ConvBlock3D(in_channels=32, out_channels=32, kernel_size=3, stride=1, dilation=2,
                                 pooling=None, activation=ReLU(inplace=True), same_padding=True)

        self.convt1 = ConvTransposeBlock3D(in_channels=32, out_channels=16, kernel_size=2, stride=2,
                                           activation=ReLU(inplace=True))
        #### UPCONV 2 ####
        self.conv4 = ConvBlock3D(in_channels=16 + 32, out_channels=16, kernel_size=3, stride=1, dilation=1,
                                 activation=ReLU(inplace=True), pooling=None, same_padding=True)

        self.convt2 = ConvTransposeBlock3D(in_channels=16, out_channels=4, kernel_size=2, stride=2,
                                           activation=ReLU(inplace=True))
        #### FINAL LAYER ####

        #self.conv5 = ConvBlock3D(in_channels=4 + 16, out_channels=out_channels, same_padding=True, kernel_size=3,
        #                         pooling=None, activation=Softmax() if out_channels > 2 else Sigmoid())

        self.conv5 = ConvBlock3D(in_channels=4 + 16, out_channels=out_channels, same_padding=True, kernel_size=3,
                                 pooling=None, activation=None)
Ejemplo n.º 4
0
    def __init__(self, in_channels=3, out_channels=5):
        super(VGG3D, self).__init__()

        self.conv1 = Sequential(
            Conv3d(in_channels, 64, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
            InstanceNorm3d(64, True), ReLU(True),
            Conv3d(64, 64, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
            InstanceNorm3d(64, True), ReLU(True),
            Conv3d(64, 64, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
            InstanceNorm3d(64, True), ReLU(True),
            MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)))
        self.conv2 = Sequential(
            Conv3d(64, 128, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
            InstanceNorm3d(128, True), ReLU(True),
            Conv3d(128, 128, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
            InstanceNorm3d(128, True), ReLU(True),
            Conv3d(128, 128, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
            InstanceNorm3d(128, True), ReLU(True),
            MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)))

        self.conv3 = Sequential(Conv3d(128, 256, kernel_size=3, padding=1),
                                InstanceNorm3d(256, True), ReLU(True),
                                Conv3d(256, 256, kernel_size=3, padding=1),
                                InstanceNorm3d(256, True), ReLU(True),
                                Conv3d(256, 256, kernel_size=3, padding=1),
                                InstanceNorm3d(256, True), ReLU(True),
                                MaxPool3d(2, stride=2))

        self.avgpool = AdaptiveAvgPool3d(1, 1, 1)
        self.fc = Linear(256, out_channels)
Ejemplo n.º 5
0
    def __init__(self, num_channels=1, feat_channels=[4, 8, 16, 32, 64], residual='conv'):

        # residual: conv for residual input x through 1*1 conv across every layer for downsampling, None for removal of residuals

        super(UNet3D, self).__init__()

        layers = [2, 2, 2, 2]
        block = BasicBlock
        self.inplanes = 16
        self.dilation = 1
        self.groups = 1
        self.base_width = 64
        self.conv1 = nn.Conv2d(9, self.inplanes, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.layer1 = self._make_layer(block, 32, layers[0])
        self.layer2 = self._make_layer(block, 64, layers[1], stride=2,
                                       dilate=False)
        self.layer3 = self._make_layer(block, 128, layers[2], stride=2,
                                       dilate=False)

        # Encoder downsamplers
        self.pool1 = MaxPool3d(kernel_size=3, stride=2, padding=1)
        self.pool2 = MaxPool3d(kernel_size=3, stride=2, padding=1)
        self.pool3 = MaxPool3d(kernel_size=3, stride=2, padding=1)
        self.pool4 = MaxPool3d(kernel_size=3, stride=2, padding=1)

        # Encoder convolutions
        self.conv_blk1 = Conv3D_Block(num_channels, feat_channels[0], residual=residual)
        self.conv_blk2 = Conv3D_Block(feat_channels[0], feat_channels[1], residual=residual)
        self.conv_blk3 = Conv3D_Block(feat_channels[1], feat_channels[2], residual=residual)
        self.conv_blk4 = Conv3D_Block(feat_channels[2], feat_channels[3], residual=residual)
        self.conv_blk5 = Conv3D_Block(feat_channels[3], feat_channels[4], residual=residual)

        # Decoder convolutions
        self.dec_conv_blk4 = Conv3D_Block(2*feat_channels[3], feat_channels[3], residual=residual)
        self.dec_conv_blk3 = Conv3D_Block(2*feat_channels[2], feat_channels[2], residual=residual)
        self.dec_conv_blk2 = Conv3D_Block(2*feat_channels[1], feat_channels[1], residual=residual)
        self.dec_conv_blk1 = Conv3D_Block(2*feat_channels[0], feat_channels[0], residual=residual)

        # Decoder upsamplers
        self.deconv_blk4 = Deconv3D_Block(feat_channels[4], feat_channels[3])
        self.deconv_blk3 = Deconv3D_Block(feat_channels[3], feat_channels[2])
        self.deconv_blk2 = Deconv3D_Block(feat_channels[2], feat_channels[1])
        self.deconv_blk1 = Deconv3D_Block(feat_channels[1], feat_channels[0])

        # Final 1*1 Conv Segmentation map
        self.one_conv     = Conv3d(feat_channels[0], num_channels, kernel_size=1, stride=1, padding=0, bias=True)
        self.one_one_conv = Conv3d(8, num_channels, kernel_size=1, stride=1, padding=0, bias=True)

        # Activation function
        self.activation = Sigmoid()
Ejemplo n.º 6
0
    def __init__(self, pv):
        super(VGG19BN_Explicit_3T , self).__init__()

        self.conv1 = ConvTTN3d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn1 = BatchNorm3d(64)
        self.conv2 = ConvTTN3d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn2 = BatchNorm3d(64)
        self.maxpool1 = MaxPool3d(kernel_size=2, padding=0, stride=2)

        self.conv3 = ConvTTN3d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn3 = BatchNorm3d(128)
        self.conv4 = ConvTTN3d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn4 = BatchNorm3d(128)
        self.maxpool2 = MaxPool3d(kernel_size=2, padding=0, stride=2)

        self.conv5 = ConvTTN3d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn5 = BatchNorm3d(256)
        self.conv6 = ConvTTN3d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn6 = BatchNorm3d(256)
        self.conv7 = ConvTTN3d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn7 = BatchNorm3d(256)
        self.conv8 = ConvTTN3d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn8 = BatchNorm3d(256)
        self.maxpool3 = MaxPool3d(kernel_size=2, padding=0, stride=2)

        self.conv9 = ConvTTN3d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn9 = BatchNorm3d(512)
        self.conv10 = ConvTTN3d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn10 = BatchNorm3d(512)
        self.conv11 = ConvTTN3d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn11 = BatchNorm3d(512)
        self.conv12 = ConvTTN3d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn12 = BatchNorm3d(512)
        self.maxpool4 = MaxPool3d(kernel_size=2, padding=0, stride=1)

        self.conv13 = ConvTTN3d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn13 = BatchNorm3d(512)
        self.conv14 = ConvTTN3d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn14 = BatchNorm3d(512)
        self.conv15 = ConvTTN3d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn15 = BatchNorm3d(512)
        self.conv16 = ConvTTN3d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, project_variable=pv, bias=True)
        self.bn16 = BatchNorm3d(512)
        self.maxpool5 = MaxPool3d(kernel_size=2, padding=0, stride=2)

        self.avgpool = AdaptiveAvgPool3d(output_size=(1, 7, 7))
        self.fc1 = Linear(25088, 4096)
        self.dropout1 = Dropout(p=0.5)
        self.fc2 = Linear(4096, 4096)
        self.dropout2 = Dropout(p=0.5)
        self.fc3 = Linear(4096, 27)
Ejemplo n.º 7
0
    def __define_timeception_layers(self, input_shape, n_layers, n_groups,
                                    expansion_factor, is_dilated):
        """
        Define layers inside the timeception layers.
        """

        n_channels_in = input_shape[1]

        # how many layers of timeception
        for i in range(n_layers):
            layer_num = i + 1

            # get details about grouping
            # 获取每个branch的通道数和经过timeception后的输出通道数
            n_channels_per_branch, n_channels_out = self.__get_n_channels_per_branch(
                n_groups, expansion_factor, n_channels_in)

            # temporal conv per group
            # 3.定义对每一个group的卷积
            self.__define_grouped_convolutions(input_shape, n_groups,
                                               n_channels_per_branch,
                                               is_dilated, layer_num)

            # downsample over time
            layer_name = 'maxpool_tc%d' % (layer_num)
            layer = MaxPool3d(kernel_size=(2, 1, 1))
            layer._name = layer_name
            setattr(self, layer_name, layer)

            n_channels_in = n_channels_out
            input_shape[1] = n_channels_in

        return n_channels_in
Ejemplo n.º 8
0
    def __init__(self, pv):
        super(AlexNetExplicit3T, self).__init__()
        self.conv1 = classic_3TConv(in_channels=3,
                                    out_channels=64,
                                    kernel_size=11,
                                    stride=(1, 3, 4),
                                    padding=2,
                                    project_variable=pv,
                                    bias=True)
        self.pool1 = MaxPool3d(kernel_size=3, stride=2)

        self.conv2 = classic_3TConv(in_channels=64,
                                    out_channels=192,
                                    kernel_size=5,
                                    padding=2,
                                    project_variable=pv,
                                    bias=True)
        self.pool2 = MaxPool3d(kernel_size=3, stride=2)

        self.conv3 = classic_3TConv(in_channels=192,
                                    out_channels=384,
                                    kernel_size=3,
                                    padding=1,
                                    project_variable=pv,
                                    bias=True)
        self.conv4 = classic_3TConv(in_channels=384,
                                    out_channels=256,
                                    kernel_size=3,
                                    padding=1,
                                    project_variable=pv,
                                    bias=True)
        self.conv5 = classic_3TConv(in_channels=256,
                                    out_channels=256,
                                    kernel_size=3,
                                    padding=1,
                                    project_variable=pv,
                                    bias=True)
        self.pool3 = MaxPool3d(kernel_size=3, stride=2)

        # self.pool4 = AdaptiveAvgPool3d(output_size=1)
        self.pool4 = AdaptiveAvgPool3d((1, 6, 6))

        self.fc1 = Linear(256 * 1 * 6 * 6, 4096)
        self.fc2 = Linear(4096, 4096)
        self.fc3 = Linear(4096, pv.label_size)
Ejemplo n.º 9
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding=0,
                 activation=ReLU(inplace=True), pooling=MaxPool3d(kernel_size=2), same_padding=False):

        super(ConvBlock3D, self).__init__()
        self.conv = Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
                           dilation=dilation, padding=padding)
        self.activation = activation
        self.pooling = pooling
        self.same_padding = same_padding
Ejemplo n.º 10
0
    def __init__(self):
        super(ConvFrontend, self).__init__()

        self.conv = Conv3d(1,
                           64, (5, 7, 7),
                           stride=(1, 2, 2),
                           padding=(2, 3, 3))
        self.norm = BatchNorm3d(64)
        self.pool = MaxPool3d((1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
Ejemplo n.º 11
0
    def __init__(self, d_model):
        super(Convolutional_Feature_Extractor, self).__init__()
        #(N,Cin,D,H,W)
        self.conv3d1 = Conv3d(in_channels=3,
                              out_channels=32,
                              kernel_size=(5, 5, 5),
                              padding=(2, 2, 2)
                              # ,stride=(1,2,2)
                              )
        self.mp3d1 = MaxPool3d(kernel_size=(1, 2, 2))
        self.bn1 = BatchNorm3d(32)
        self.basicblock1 = BasicBlock3D(inplanes=32, outplanes=32)

        self.conv3d2 = Conv3d(in_channels=32,
                              out_channels=64,
                              kernel_size=(5, 5, 5),
                              padding=(2, 2, 2)
                              # ,stride=(1,2,2)
                              )
        self.mp3d2 = MaxPool3d(kernel_size=(1, 2, 2),
                               stride=(1, 2, 2))  #(N,Cin,D,H',W')
        self.bn2 = BatchNorm3d(64)
        self.basicblock2 = BasicBlock3D(inplanes=64, outplanes=64)

        self.conv3d3 = Conv3d(in_channels=64,
                              out_channels=96,
                              kernel_size=(1, 5, 5),
                              padding=(0, 2, 2)
                              # ,stride=(1,2,2)
                              )
        self.mp3d3 = MaxPool3d(kernel_size=(1, 2, 2),
                               stride=(1, 2, 2))  # (N,Cin,D,H',W')
        self.bn3 = BatchNorm3d(96)
        self.basicblock3 = BasicBlock3D(inplanes=96, outplanes=96)

        self.gap = AdaptiveAvgPool2d((1, 1))
        #self.linear=Linear(in_features=96*72,out_features=d_model)
        self.linear = Linear(in_features=96, out_features=d_model)
        self.bn = BatchNorm1d(d_model)
Ejemplo n.º 12
0
 def __init__(
     self, features_out: int, depth: int, kernel_size: int = 3, normalization: bool = True, is_input: bool = False
 ):
     super().__init__()
     norm_first = not is_input
     inch, ouch = self._in_out_channels(depth, features_out)
     self.conv0 = ConvUnit(
         in_channels=inch[0], out_channels=ouch[0], normalization=norm_first, kernel_size=kernel_size
     )
     self.conv1 = ConvUnit(
         in_channels=inch[1], out_channels=ouch[1], normalization=normalization, kernel_size=kernel_size
     )
     self.pool = MaxPool3d(kernel_size=2, stride=2)
Ejemplo n.º 13
0
 def __init__(self, pv):
     super(ResNet18, self).__init__()
     self.conv1_relu = ConvolutionBlock(3, 64, pv)
     self.maxpool = MaxPool3d(kernel_size=3,
                              padding=1,
                              stride=2,
                              dilation=1)
     self.res2a_relu = ResidualBlock(64, 64, pv)
     self.res2b_relu = ResidualBlock(64, 64, pv)
     self.res3a_relu = ResidualBlockB(64, 128, pv)
     self.res3b_relu = ResidualBlock(128, 128, pv)
     self.res4a_relu = ResidualBlockB(128, 256, pv)
     self.res4b_relu = ResidualBlock(256, 256, pv)
     self.res5a_relu = ResidualBlockB(256, 512, pv)
     self.res5b_relu = ResidualBlock(512, 512, pv)
     self.avgpool = AdaptiveAvgPool3d(output_size=1)
     self.fc = torch.nn.Linear(512, 27)
Ejemplo n.º 14
0
    def __define_timeception_layers(self, input_shape, n_layers, n_groups,
                                    expansion_factor, is_dilated):
        '''
        Define layers inside the timeception layers. 定义timeception层的内部操作结构
        :param input_shape: (32, 1024, 128, 7, 7):1024输入的通道数
        :param n_layers: 2
        :param n_groups: 8
        :param expansion_factor: 1.25
        :param is_dilated: True
        :return: n_channels_in :返回该层timeception的输出通道数,作为下一层timeception的输入通道数
        '''
        n_channels_in = input_shape[1]

        # how many layers of timeception
        for i in range(n_layers):
            # i表示层数
            layer_num = i + 1

            # get details about grouping
            n_channels_per_branch, n_channels_out = self.__get_n_channels_per_branch(
                n_groups, expansion_factor, n_channels_in)

            # temporal conv per group,在这里将timeception中用到的操作进行了定义
            self.__define_grouped_convolutions(input_shape, n_groups,
                                               n_channels_per_branch,
                                               is_dilated, layer_num)

            # downsample over time,定义在时间域上的下采样操作,即在时间域上进行最大池化操作,来满足第二个子空间平衡的原则
            layer_name = 'maxpool_tc%d' % (layer_num)
            layer = MaxPool3d(kernel_size=(2, 1, 1))
            layer._name = layer_name
            setattr(self, layer_name, layer)

            n_channels_in = n_channels_out
            input_shape[
                1] = n_channels_in  #下一层输入时的通道个数[1280, 1600, 2000, 2480]

        return n_channels_in
Ejemplo n.º 15
0
    def __init__(self, thresh, radius, setup_params):
        super().__init__()
        self.thresh = thresh
        self.r = radius
        self.device = setup_params['device']
        self.psize_xy = setup_params['pixel_size_rec']
        self.psize_z = setup_params['pixel_size_axial']
        self.zmin = setup_params['zmin']
        self.upsampling_shift = 0  # 2 due to floor(W/2) affected by upsampling factor of 4
        self.maxpool = MaxPool3d(kernel_size=2 * self.r + 1,
                                 stride=1,
                                 padding=self.r)
        self.pad = ConstantPad3d(self.r, 0.0)
        self.zero = torch.FloatTensor([0.0]).to(self.device)

        # construct the local average filters
        filt_vec = np.arange(-self.r, self.r + 1)
        yfilter, zfilter, xfilter = np.meshgrid(filt_vec, filt_vec, filt_vec)
        xfilter = torch.FloatTensor(xfilter).unsqueeze(0).unsqueeze(0)
        yfilter = torch.FloatTensor(yfilter).unsqueeze(0).unsqueeze(0)
        zfilter = torch.FloatTensor(zfilter).unsqueeze(0).unsqueeze(0)
        sfilter = torch.ones_like(xfilter)
        self.local_filter = torch.cat((sfilter, xfilter, yfilter, zfilter),
                                      0).to(self.device)
Ejemplo n.º 16
0
    def __init__(self, pv):
        super(Googlenet3TConv_explicit, self).__init__()

        self.conv1 = ConvTTN3d(in_channels=3, out_channels=64, kernel_size=7, padding=3, stride=2, project_variable=pv, bias=False)
        self.bn1 = BatchNorm3d(64)
        self.maxpool1 = MaxPool3d(kernel_size=(1, 3, 3), padding=0, stride=(1, 2, 2))
        self.conv2 = Conv3d(in_channels=64, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn2 = BatchNorm3d(64)
        self.conv3 = ConvTTN3d(in_channels=64, out_channels=192, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn3 = BatchNorm3d(192)
        self.maxpool2 = MaxPool3d(kernel_size=(1, 3, 3), padding=0, stride=(1, 2, 2))

        # inception 3a
        self.conv4 = Conv3d(in_channels=192, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn4 = BatchNorm3d(64)
        self.conv5 = Conv3d(in_channels=192, out_channels=96, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn5 = BatchNorm3d(96)
        self.conv6 = ConvTTN3d(in_channels=96, out_channels=128, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn6 = BatchNorm3d(128)
        self.conv7 = Conv3d(in_channels=192, out_channels=16, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn7 = BatchNorm3d(16)
        self.conv8 = ConvTTN3d(in_channels=16, out_channels=32, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn8 = BatchNorm3d(32)
        self.maxpool3 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv9 = Conv3d(in_channels=192, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn9 = BatchNorm3d(32)

        # inception 3b
        self.conv10 = Conv3d(in_channels=256, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn10 = BatchNorm3d(128)
        self.conv11 = Conv3d(in_channels=256, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn11 = BatchNorm3d(128)
        self.conv12 = ConvTTN3d(in_channels=128, out_channels=192, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn12 = BatchNorm3d(192)
        self.conv13 = Conv3d(in_channels=256, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn13 = BatchNorm3d(32)
        self.conv14 = ConvTTN3d(in_channels=32, out_channels=96, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn14 = BatchNorm3d(96)
        self.maxpool4 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv15 = Conv3d(in_channels=256, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn15 = BatchNorm3d(64)

        self.maxpool5 = MaxPool3d(kernel_size=3, padding=0, stride=2)

        # inception 4a
        self.conv16 = Conv3d(in_channels=480, out_channels=192, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn16 = BatchNorm3d(192)
        self.conv17 = Conv3d(in_channels=480, out_channels=96, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn17 = BatchNorm3d(96)
        self.conv18 = ConvTTN3d(in_channels=96, out_channels=208, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn18 = BatchNorm3d(208)
        self.conv19 = Conv3d(in_channels=480, out_channels=16, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn19 = BatchNorm3d(16)
        self.conv20 = ConvTTN3d(in_channels=16, out_channels=48, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn20 = BatchNorm3d(48)
        self.maxpool6 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv21 = Conv3d(in_channels=480, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn21 = BatchNorm3d(64)

        # inception 4b
        self.conv22 = Conv3d(in_channels=512, out_channels=160, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn22 = BatchNorm3d(160)
        self.conv23 = Conv3d(in_channels=512, out_channels=112, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn23 = BatchNorm3d(112)
        self.conv24 = ConvTTN3d(in_channels=112, out_channels=224, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn24 = BatchNorm3d(224)
        self.conv25 = Conv3d(in_channels=512, out_channels=24, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn25 = BatchNorm3d(24)
        self.conv26 = ConvTTN3d(in_channels=24, out_channels=64, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn26 = BatchNorm3d(64)
        self.maxpool7 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv27 = Conv3d(in_channels=512, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn27 = BatchNorm3d(64)

        self.avgpool1 = AvgPool3d(kernel_size=5, padding=0, stride=3)
        self.conv28 = Conv3d(in_channels=512, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn28 = BatchNorm3d(128)
        # self.fc1 = Linear(in_features=2304, out_features=1024)
        self.fc1 = Linear(in_features=768, out_features=1024)  # 768
        self.dropout1 = Dropout3d(p=0.7)
        self.fc2 = Linear(in_features=1024, out_features=pv.label_size)

        # inception 4c
        self.conv29 = Conv3d(in_channels=512, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn29 = BatchNorm3d(128)
        self.conv30 = Conv3d(in_channels=512, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn30 = BatchNorm3d(128)
        self.conv31 = ConvTTN3d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn31 = BatchNorm3d(256)
        self.conv32 = Conv3d(in_channels=512, out_channels=24, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn32 = BatchNorm3d(24)
        self.conv33 = ConvTTN3d(in_channels=24, out_channels=64, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn33 = BatchNorm3d(64)
        self.maxpool8 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv34 = Conv3d(in_channels=512, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn34 = BatchNorm3d(64)

        # inception 4d
        self.conv35 = Conv3d(in_channels=512, out_channels=112, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn35 = BatchNorm3d(112)
        self.conv36 = Conv3d(in_channels=512, out_channels=144, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn36 = BatchNorm3d(144)
        self.conv37 = ConvTTN3d(in_channels=144, out_channels=288, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn37 = BatchNorm3d(288)
        self.conv38 = Conv3d(in_channels=512, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn38 = BatchNorm3d(32)
        self.conv39 = ConvTTN3d(in_channels=32, out_channels=64, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn39 = BatchNorm3d(64)
        self.maxpool9 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv40 = Conv3d(in_channels=512, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn40 = BatchNorm3d(64)

        # inception 4e
        self.conv41 = Conv3d(in_channels=528, out_channels=256, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn41 = BatchNorm3d(256)
        self.conv42 = Conv3d(in_channels=528, out_channels=160, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn42 = BatchNorm3d(160)
        self.conv43 = ConvTTN3d(in_channels=160, out_channels=320, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn43 = BatchNorm3d(320)
        self.conv44 = Conv3d(in_channels=528, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn44 = BatchNorm3d(32)
        self.conv45 = ConvTTN3d(in_channels=32, out_channels=128, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn45 = BatchNorm3d(128)
        self.maxpool10 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv46 = Conv3d(in_channels=528, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn46 = BatchNorm3d(128)

        self.avgpool2 = AvgPool3d(kernel_size=5, padding=0, stride=3)
        self.conv47 = Conv3d(in_channels=528, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn47 = BatchNorm3d(128)
        # self.fc3 = Linear(in_features=2304, out_features=1024)
        self.fc3 = Linear(in_features=768, out_features=1024)
        self.dropout2 = Dropout3d(p=0.7)
        self.fc4 = Linear(in_features=1024, out_features=pv.label_size)

        self.maxpool11 = MaxPool3d(kernel_size=3, padding=0, stride=2)

        # inception 5a
        self.conv48 = Conv3d(in_channels=832, out_channels=256, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn48 = BatchNorm3d(256)
        self.conv49 = Conv3d(in_channels=832, out_channels=160, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn49 = BatchNorm3d(160)
        self.conv50 = ConvTTN3d(in_channels=160, out_channels=320, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn50 = BatchNorm3d(320)
        self.conv51 = Conv3d(in_channels=832, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn51 = BatchNorm3d(32)
        self.conv52 = ConvTTN3d(in_channels=32, out_channels=128, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn52 = BatchNorm3d(128)
        self.maxpool12 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv53 = Conv3d(in_channels=832, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn53 = BatchNorm3d(128)

        # inception 5b
        self.conv54 = Conv3d(in_channels=832, out_channels=384, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn54 = BatchNorm3d(384)
        self.conv55 = Conv3d(in_channels=832, out_channels=192, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn55 = BatchNorm3d(192)
        self.conv56 = ConvTTN3d(in_channels=192, out_channels=384, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn56 = BatchNorm3d(384)
        self.conv57 = Conv3d(in_channels=832, out_channels=48, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn57 = BatchNorm3d(48)
        self.conv58 = ConvTTN3d(in_channels=48, out_channels=128, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn58 = BatchNorm3d(128)
        self.maxpool13 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv59 = Conv3d(in_channels=832, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn59 = BatchNorm3d(128)

        self.avgpool3 = AdaptiveAvgPool3d(1)
        self.dropout3 = Dropout3d(p=0.4)
        self.fc5 = Linear(in_features=1024, out_features=pv.label_size)
Ejemplo n.º 17
0
def Maxpool3D_Block():
    
    pool = MaxPool3d(kernel_size=2, stride=2, padding=0)
    
    return pool       
Ejemplo n.º 18
0
    def __define_temporal_convolutional_block(self, input_shape,
                                              n_channels_per_branch_out,
                                              kernel_sizes, dilation_rates,
                                              layer_num, group_num):
        '''
        Define 5 branches of convolutions that operate of channels of each group.
        定义每组通道的具体处理操作
        :param input_shape: (32, 128, 128, 7, 7):1024输入的通道数
        :param n_channels_per_branch_out: [32,40,50,62]
        :param kernel_sizes: dilated-[3,3,3],no dilated-[3,5,7]
        :param dilation_rates: dilated-[1,2,3],no dilated-[1,1,1]
        :param layer_num: 当前的层数
        :param group_num: 当前的group数
        :return: 定义temporal卷积的block
        '''

        n_channels_in = input_shape[1]

        dw_input_shape = list(input_shape)
        dw_input_shape[1] = n_channels_per_branch_out  #[32,40,50,62]

        # setattr()函数对应函数getattr(),用于设置属性值,该属性不一定是存在的
        '''
        示例:
        layer_name = 'conv_b1_g%d_tc%d' % (1, 1)
        layer_name
        Out[13]: 'conv_b1_g1_tc1',表示第1个timeception层的第1个group的第1个branch 
        '''
        # branch 1: dimension reduction only and no temporal conv (kernel-size 1)
        layer_name = 'conv_b1_g%d_tc%d' % (
            group_num, layer_num)  #不同timeception层的不同group在同一个branch上采用的是同一个操作
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)
        layer_name = 'bn_b1_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)
        # 缩减通道
        layer_name = 'conv_b2_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        # layer = Conv3d(n_channels_in = 128, n_channels_per_branch_out = 32, kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)

        #卷积
        layer_name = 'convdw_b2_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(dw_input_shape, kernel_sizes[0],
                                     dilation_rates[0], layer_name)
        # layer = DepthwiseConv1DLayer(dw_input_shape=(32, 32, 128, 7, 7) , kernel_sizes[0], dilation_rates[0], layer_name)
        setattr(self, layer_name, layer)

        #BN操作
        layer_name = 'bn_b2_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)
        layer_name = 'conv_b3_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)

        layer_name = 'convdw_b3_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(dw_input_shape, kernel_sizes[1],
                                     dilation_rates[1], layer_name)
        setattr(self, layer_name, layer)

        layer_name = 'bn_b3_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)
        layer_name = 'conv_b4_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)

        layer_name = 'convdw_b4_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(dw_input_shape, kernel_sizes[2],
                                     dilation_rates[2], layer_name)
        setattr(self, layer_name, layer)

        layer_name = 'bn_b4_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # branch 5: dimension reduction followed by temporal max pooling
        layer_name = 'conv_b5_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)

        layer_name = 'maxpool_b5_g%d_tc%d' % (group_num, layer_num)
        layer = MaxPool3d(kernel_size=(2, 1, 1), stride=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)

        layer_name = 'padding_b5_g%d_tc%d' % (group_num, layer_num)
        layer = torch.nn.ReplicationPad3d(
            (0, 0, 0, 0, 1, 0))  # left, right, top, bottom, front, back
        layer._name = layer_name
        setattr(self, layer_name, layer)

        layer_name = 'bn_b5_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)
Ejemplo n.º 19
0
    def build_model(self):

        num_featmaps_lay1 = self.num_featmaps_base
        self.convolution_downlay1_1 = Conv3d(self.num_channels_in,
                                             num_featmaps_lay1,
                                             kernel_size=3,
                                             padding=1)
        self.convolution_downlay1_2 = Conv3d(num_featmaps_lay1,
                                             num_featmaps_lay1,
                                             kernel_size=3,
                                             padding=1)
        self.pooling_downlay1 = MaxPool3d(kernel_size=2, padding=0)

        num_featmaps_lay2 = 2 * num_featmaps_lay1
        self.convolution_downlay2_1 = Conv3d(num_featmaps_lay1,
                                             num_featmaps_lay2,
                                             kernel_size=3,
                                             padding=1)
        self.convolution_downlay2_2 = Conv3d(num_featmaps_lay2,
                                             num_featmaps_lay2,
                                             kernel_size=3,
                                             padding=1)
        self.pooling_downlay2 = MaxPool3d(kernel_size=2, padding=0)

        num_featmaps_lay3 = 2 * num_featmaps_lay2
        self.convolution_downlay3_1 = Conv3d(num_featmaps_lay2,
                                             num_featmaps_lay3,
                                             kernel_size=3,
                                             padding=1)
        self.convolution_downlay3_2 = Conv3d(num_featmaps_lay3,
                                             num_featmaps_lay3,
                                             kernel_size=3,
                                             padding=1)
        self.pooling_downlay3 = MaxPool3d(kernel_size=2, padding=0)

        num_featmaps_lay4 = 2 * num_featmaps_lay3
        self.convolution_downlay4_1 = Conv3d(num_featmaps_lay3,
                                             num_featmaps_lay4,
                                             kernel_size=3,
                                             padding=1)
        self.convolution_downlay4_2 = Conv3d(num_featmaps_lay4,
                                             num_featmaps_lay4,
                                             kernel_size=3,
                                             padding=1)
        self.pooling_downlay4 = MaxPool3d(kernel_size=2, padding=0)

        num_featmaps_lay5 = 2 * num_featmaps_lay4
        self.convolution_downlay5_1 = Conv3d(num_featmaps_lay4,
                                             num_featmaps_lay5,
                                             kernel_size=3,
                                             padding=1)
        self.convolution_downlay5_2 = Conv3d(num_featmaps_lay5,
                                             num_featmaps_lay5,
                                             kernel_size=3,
                                             padding=1)
        self.upsample_uplay5 = Upsample(scale_factor=2, mode='nearest')

        num_featmaps_lay4pl5 = num_featmaps_lay4 + num_featmaps_lay5
        self.convolution_uplay4_1 = Conv3d(num_featmaps_lay4pl5,
                                           num_featmaps_lay4,
                                           kernel_size=3,
                                           padding=1)
        self.convolution_uplay4_2 = Conv3d(num_featmaps_lay4,
                                           num_featmaps_lay4,
                                           kernel_size=3,
                                           padding=1)
        self.upsample_uplay4 = Upsample(scale_factor=2, mode='nearest')

        num_featmaps_lay3pl4 = num_featmaps_lay3 + num_featmaps_lay4
        self.convolution_uplay3_1 = Conv3d(num_featmaps_lay3pl4,
                                           num_featmaps_lay3,
                                           kernel_size=3,
                                           padding=1)
        self.convolution_uplay3_2 = Conv3d(num_featmaps_lay3,
                                           num_featmaps_lay3,
                                           kernel_size=3,
                                           padding=1)
        self.upsample_uplay3 = Upsample(scale_factor=2, mode='nearest')

        num_featmaps_lay2pl3 = num_featmaps_lay2 + num_featmaps_lay3
        self.convolution_uplay2_1 = Conv3d(num_featmaps_lay2pl3,
                                           num_featmaps_lay2,
                                           kernel_size=3,
                                           padding=1)
        self.convolution_uplay2_2 = Conv3d(num_featmaps_lay2,
                                           num_featmaps_lay2,
                                           kernel_size=3,
                                           padding=1)
        self.upsample_uplay2 = Upsample(scale_factor=2, mode='nearest')

        num_featmaps_lay1pl2 = num_featmaps_lay1 + num_featmaps_lay2
        self.convolution_uplay1_1 = Conv3d(num_featmaps_lay1pl2,
                                           num_featmaps_lay1,
                                           kernel_size=3,
                                           padding=1)
        self.convolution_uplay1_2 = Conv3d(num_featmaps_lay1,
                                           num_featmaps_lay1,
                                           kernel_size=3,
                                           padding=1)

        self.classification_layer = Conv3d(num_featmaps_lay1,
                                           self.num_classes_out,
                                           kernel_size=1,
                                           padding=0)
        self.activation_layer = Sigmoid()
Ejemplo n.º 20
0
    def build_model(self):

        num_featmaps_lay1 = self.num_featmaps_base
        self.convolution_downlay1_1 = Conv3d(self.num_channels_in,
                                             num_featmaps_lay1,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay1_1 = ReLU(inplace=True)
        # self.batchnorm_downlay1_1 = BatchNorm3d(num_featmaps_lay1)
        self.convolution_downlay1_2 = Conv3d(num_featmaps_lay1,
                                             num_featmaps_lay1,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay1_2 = ReLU(inplace=True)
        # self.batchnorm_downlay1_2 = BatchNorm3d(num_featmaps_lay1)
        # self.dropout_downlay1 = Dropout3d(p=self.dropout_rate)
        self.pooling_downlay1 = MaxPool3d(kernel_size=2, padding=0)

        num_featmaps_lay2 = 2 * num_featmaps_lay1
        self.convolution_downlay2_1 = Conv3d(num_featmaps_lay1,
                                             num_featmaps_lay2,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay2_1 = ReLU(inplace=True)
        # self.batchnorm_downlay2_1 = BatchNorm3d(num_featmaps_lay2)
        self.convolution_downlay2_2 = Conv3d(num_featmaps_lay2,
                                             num_featmaps_lay2,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay2_2 = ReLU(inplace=True)
        # self.batchnorm_downlay2_2 = BatchNorm3d(num_featmaps_lay2)
        # self.dropout_downlay2 = Dropout3d(p =self.dropout_rate)
        self.pooling_downlay2 = MaxPool3d(kernel_size=2, padding=0)

        num_featmaps_lay3 = 2 * num_featmaps_lay2
        self.convolution_downlay3_1 = Conv3d(num_featmaps_lay2,
                                             num_featmaps_lay3,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay3_1 = ReLU(inplace=True)
        # self.batchnorm_downlay3_1 = BatchNorm3d(num_featmaps_lay3)
        self.convolution_downlay3_2 = Conv3d(num_featmaps_lay3,
                                             num_featmaps_lay3,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay3_2 = ReLU(inplace=True)
        # self.batchnorm_downlay3_2 = BatchNorm3d(num_featmaps_lay3)
        # self.dropout_downlay3 = Dropout3d(p=self.dropout_rate)
        self.pooling_downlay3 = MaxPool3d(kernel_size=2, padding=0)

        num_featmaps_lay4 = 2 * num_featmaps_lay3
        self.convolution_downlay4_1 = Conv3d(num_featmaps_lay3,
                                             num_featmaps_lay4,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay4_1 = ReLU(inplace=True)
        # self.batchnorm_downlay4_1 = BatchNorm3d(num_featmaps_lay4)
        self.convolution_downlay4_2 = Conv3d(num_featmaps_lay4,
                                             num_featmaps_lay4,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay4_2 = ReLU(inplace=True)
        # self.batchnorm_downlay4_2 = BatchNorm3d(num_featmaps_lay4)
        # self.dropout_downlay4 = Dropout3d(p=self.dropout_rate)
        self.pooling_downlay4 = MaxPool3d(kernel_size=(1, 2, 2), padding=0)

        num_featmaps_lay5 = 2 * num_featmaps_lay4
        self.convolution_downlay5_1 = Conv3d(num_featmaps_lay4,
                                             num_featmaps_lay5,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay5_1 = ReLU(inplace=True)
        # self.batchnorm_downlay5_1 = BatchNorm3d(num_featmaps_lay5)
        self.convolution_downlay5_2 = Conv3d(num_featmaps_lay5,
                                             num_featmaps_lay5,
                                             kernel_size=3,
                                             padding=1)
        self.activation_downlay5_2 = ReLU(inplace=True)
        # self.batchnorm_downlay5_2 = BatchNorm3d(num_featmaps_lay5)
        # self.dropout_downlay5 = Dropout3d(p=self.dropout_rate)
        self.upsample_downlay5 = Upsample(scale_factor=(1, 2, 2),
                                          mode='nearest')

        num_featmaps_lay4pl5 = num_featmaps_lay4 + num_featmaps_lay5
        self.convolution_uplay4_1 = Conv3d(num_featmaps_lay4pl5,
                                           num_featmaps_lay4,
                                           kernel_size=3,
                                           padding=1)
        self.activation_uplay4_1 = ReLU(inplace=True)
        # self.batchnorm_uplay4_1 = BatchNorm3d(num_featmaps_lay4)
        self.convolution_uplay4_2 = Conv3d(num_featmaps_lay4,
                                           num_featmaps_lay4,
                                           kernel_size=3,
                                           padding=1)
        self.activation_uplay4_2 = ReLU(inplace=True)
        # self.batchnorm_uplay4_2 = BatchNorm3d(num_featmaps_lay4)
        # self.dropout_uplay4 = Dropout3d(p=self.dropout_rate)
        self.upsample_uplay4 = Upsample(scale_factor=2, mode='nearest')

        num_featmaps_lay3pl4 = num_featmaps_lay3 + num_featmaps_lay4
        self.convolution_uplay3_1 = Conv3d(num_featmaps_lay3pl4,
                                           num_featmaps_lay3,
                                           kernel_size=3,
                                           padding=1)
        self.activation_uplay3_1 = ReLU(inplace=True)
        # self.batchnorm_uplay3_1 = BatchNorm3d(num_featmaps_lay3)
        self.convolution_uplay3_2 = Conv3d(num_featmaps_lay3,
                                           num_featmaps_lay3,
                                           kernel_size=3,
                                           padding=1)
        self.activation_uplay3_2 = ReLU(inplace=True)
        # self.batchnorm_uplay3_2 = BatchNorm3d(num_featmaps_lay3)
        # self.dropout_uplay3 = Dropout3d(p=self.dropout_rate)
        self.upsample_uplay3 = Upsample(scale_factor=2, mode='nearest')

        num_featmaps_lay2pl3 = num_featmaps_lay2 + num_featmaps_lay3
        self.convolution_uplay2_1 = Conv3d(num_featmaps_lay2pl3,
                                           num_featmaps_lay2,
                                           kernel_size=3,
                                           padding=1)
        self.activation_uplay2_1 = ReLU(inplace=True)
        # self.batchnorm_uplay2_1 = BatchNorm3d(num_featmaps_lay2)
        self.convolution_uplay2_2 = Conv3d(num_featmaps_lay2,
                                           num_featmaps_lay2,
                                           kernel_size=3,
                                           padding=1)
        self.activation_uplay2_2 = ReLU(inplace=True)
        # self.batchnorm_uplay2_2 = BatchNorm3d(num_featmaps_lay2)
        # self.dropout_uplay2 = Dropout3d(p=self.dropout_rate)
        self.upsample_uplay2 = Upsample(scale_factor=2, mode='nearest')

        num_featmaps_lay1pl2 = num_featmaps_lay1 + num_featmaps_lay2
        self.convolution_uplay1_1 = Conv3d(num_featmaps_lay1pl2,
                                           num_featmaps_lay1,
                                           kernel_size=3,
                                           padding=1)
        self.activation_uplay1_1 = ReLU(inplace=True)
        # self.batchnorm_uplay1_1 = BatchNorm3d(num_featmaps_lay1)
        self.convolution_uplay1_2 = Conv3d(num_featmaps_lay1,
                                           num_featmaps_lay1,
                                           kernel_size=3,
                                           padding=1)
        self.activation_uplay1_2 = ReLU(inplace=True)
        # self.batchnorm_uplay1_2 = BatchNorm3d(num_featmaps_lay1)
        # self.dropout_uplay1 = Dropout3d(p=self.dropout_rate)
        self.classification_layer = Conv3d(num_featmaps_lay1,
                                           self.num_classes_out,
                                           kernel_size=1,
                                           padding=0)

        self.activation_output = Sigmoid()
        def __init__(self, feat_channels=[32, 64, 128, 256, 512], residual='conv'):
            # residual: conv for residual input x through 1*1 conv across every layer for downsampling, None for removal of residuals

            super(UNet_David, self).__init__()

            class Conv3D_Block(Module):

                def __init__(self, inp_feat, out_feat, kernel=3, stride=1, padding=1, residual=None):

                    super(Conv3D_Block, self).__init__()

                    self.conv1 = Sequential(
                        Conv3d(inp_feat, out_feat, kernel_size=kernel,
                               stride=stride, padding=padding, bias=True),
                        BatchNorm3d(out_feat),
                        ReLU())

                    self.conv2 = Sequential(
                        Conv3d(out_feat, out_feat, kernel_size=kernel,
                               stride=stride, padding=padding, bias=True),
                        BatchNorm3d(out_feat),
                        ReLU())

                    self.residual = residual

                    if self.residual is not None:
                        self.residual_upsampler = Conv3d(inp_feat, out_feat, kernel_size=1, bias=False)

                def forward(self, x):

                    res = x

                    if not self.residual:
                        return self.conv2(self.conv1(x))
                    else:
                        return self.conv2(self.conv1(x)) + self.residual_upsampler(res)

            class Deconv3D_Block(Module):

                def __init__(self, inp_feat, out_feat, kernel=3, stride=2, padding=1):
                    super(Deconv3D_Block, self).__init__()

                    self.deconv = Sequential(
                        ConvTranspose3d(inp_feat, out_feat, kernel_size=(kernel, kernel, kernel),
                                        stride=(stride, stride, stride), padding=(padding, padding, padding),
                                        output_padding=1, bias=True),
                        ReLU())

                def forward(self, x):
                    return self.deconv(x)

            class ChannelPool3d(AvgPool1d):

                def __init__(self, kernel_size, stride, padding):
                    super(ChannelPool3d, self).__init__(kernel_size, stride, padding)
                    self.pool_1d = AvgPool1d(self.kernel_size, self.stride, self.padding, self.ceil_mode)

                def forward(self, inp):
                    n, c, d, w, h = inp.size()
                    inp = inp.view(n, c, d * w * h).permute(0, 2, 1)
                    pooled = self.pool_1d(inp)
                    c = int(c / self.kernel_size[0])
                    return inp.view(n, c, d, w, h)

            # Encoder downsamplers
            self.pool1 = MaxPool3d((2, 2, 2))
            self.pool2 = MaxPool3d((2, 2, 2))
            self.pool3 = MaxPool3d((2, 2, 2))
            self.pool4 = MaxPool3d((2, 2, 2))

            # Encoder convolutions
            self.conv_blk1 = Conv3D_Block(opt.in_channels, feat_channels[0], residual=residual)
            self.conv_blk2 = Conv3D_Block(feat_channels[0], feat_channels[1], residual=residual)
            self.conv_blk3 = Conv3D_Block(feat_channels[1], feat_channels[2], residual=residual)
            self.conv_blk4 = Conv3D_Block(feat_channels[2], feat_channels[3], residual=residual)
            self.conv_blk5 = Conv3D_Block(feat_channels[3], feat_channels[4], residual=residual)

            # Decoder convolutions
            self.dec_conv_blk4 = Conv3D_Block(2 * feat_channels[3], feat_channels[3], residual=residual)
            self.dec_conv_blk3 = Conv3D_Block(2 * feat_channels[2], feat_channels[2], residual=residual)
            self.dec_conv_blk2 = Conv3D_Block(2 * feat_channels[1], feat_channels[1], residual=residual)
            self.dec_conv_blk1 = Conv3D_Block(2 * feat_channels[0], feat_channels[0], residual=residual)

            # Decoder upsamplers
            self.deconv_blk4 = Deconv3D_Block(feat_channels[4], feat_channels[3])
            self.deconv_blk3 = Deconv3D_Block(feat_channels[3], feat_channels[2])
            self.deconv_blk2 = Deconv3D_Block(feat_channels[2], feat_channels[1])
            self.deconv_blk1 = Deconv3D_Block(feat_channels[1], feat_channels[0])

            # Final 1*1 Conv Segmentation map
            self.one_conv = Conv3d(feat_channels[0], opt.out_channels, kernel_size=1, stride=1, padding=0, bias=True)
Ejemplo n.º 22
0
from torch import tensor, float32
from torch.nn import MaxPool3d, MaxUnpool3d
"""
二维的最大值池化逆转操作
*注意*:该操作是有损操作
"""
pool = MaxPool3d(kernel_size=2, stride=1, return_indices=True)
input = tensor(
    [[[[[1, 1, 2], [1, 2, 3], [2, 3, 4]], [[1, 2, 3], [2, 3, 4], [3, 4, 5]],
       [[2, 3, 4], [3, 4, 5], [4, 5, 6]]]]],
    dtype=float32)
output, indices = pool(input)
print(output)
# tensor([[[[[3., 4.],
#            [4., 5.]],
#
#           [[4., 5.],
#            [5., 6.]]]]])
print(indices)
# tensor([[[[[13, 14],
#            [16, 17]],
#
#           [[22, 23],
#            [25, 26]]]]])
unpool = MaxUnpool3d(kernel_size=2, stride=1)
unpool_out = unpool(output, indices)
print(unpool_out)
# tensor([[[[[0., 0., 0.],
#            [0., 0., 0.],
#            [0., 0., 0.]],
#
Ejemplo n.º 23
0
from torch import tensor, float32
from torch.nn import MaxPool3d
"""
@:param kernel_size (int) – 池化器窗口大小
@:param stride (int) – 池化器滑动步长. Default value is kernel_size
@:param padding (int) – 边缘0填充,default 0
@:param dilation (int)– 控制窗口内元素的跨度
@:param return_indices (bool) – if True, 则返回结果是最大值的索引. Useful for torch.nn.MaxUnpool1d later
@:param ceil_mode (bool) – when True, 向上取整
"""
pool = MaxPool3d(kernel_size=2, stride=1)
input = tensor(
    [[[[[0, 1, 2], [1, 2, 3], [2, 3, 4]], [[1, 2, 3], [2, 3, 4], [3, 4, 5]],
       [[2, 3, 4], [3, 4, 5], [4, 5, 6]]]]],
    dtype=float32)
output = pool(input)
print(output)
# tensor([[[[[3., 4.],
#            [4., 5.]],
#
#           [[4., 5.],
#            [5., 6.]]]]])
        def __init__(self, input_features, input_dim):
            super().__init__()

            # First Convolutional Block
            self.conv11 = Conv3d(in_channels=input_features,
                                 out_channels=32,
                                 kernel_size=(1, 3, 3),
                                 stride=(1, 1, 1),
                                 padding=(0, 0, 0),
                                 bias=True)

            self.conv12 = Conv3d(in_channels=32,
                                 out_channels=32,
                                 kernel_size=(1, 3, 3),
                                 stride=(1, 1, 1),
                                 padding=0,
                                 bias=True)

            self.conv13 = Conv3d(in_channels=32,
                                 out_channels=32,
                                 kernel_size=(1, 3, 3),
                                 stride=(1, 1, 1),
                                 padding=0,
                                 bias=True)

            self.conv14 = Conv3d(in_channels=32,
                                 out_channels=32,
                                 kernel_size=(1, 3, 3),
                                 stride=(1, 1, 1),
                                 padding=0,
                                 bias=True)

            self.maxPool1 = MaxPool3d(kernel_size=(1, 2, 2),
                                      stride=(1, 2, 2),
                                      padding=0)

            self.batchN1 = BatchNorm3d(num_features=32)

            # Compute output dimensions
            dimconv11 = compute_conv_dim(im_dim=input_dim,
                                         padding=0,
                                         dilation=1,
                                         kernel_size=3,
                                         stride=1)
            dimconv12 = compute_conv_dim(im_dim=dimconv11,
                                         padding=0,
                                         dilation=1,
                                         kernel_size=3,
                                         stride=1)
            dimconv13 = compute_conv_dim(im_dim=dimconv12,
                                         padding=0,
                                         dilation=1,
                                         kernel_size=3,
                                         stride=1)
            dimconv14 = compute_conv_dim(im_dim=dimconv13,
                                         padding=0,
                                         dilation=1,
                                         kernel_size=3,
                                         stride=1)
            self.dim_max = compute_conv_dim(im_dim=dimconv14,
                                            padding=0,
                                            dilation=1,
                                            kernel_size=2,
                                            stride=2)

            # Second Convolutional Block
            self.conv21 = Conv3d(in_channels=32,
                                 out_channels=64,
                                 kernel_size=(1, 3, 3),
                                 stride=(1, 1, 1),
                                 padding=(0, 0, 0),
                                 bias=True)

            self.conv22 = Conv3d(in_channels=64,
                                 out_channels=64,
                                 kernel_size=(1, 3, 3),
                                 stride=(1, 1, 1),
                                 padding=0,
                                 bias=True)

            self.maxPool2 = MaxPool3d(kernel_size=(1, 2, 2),
                                      stride=(1, 2, 2),
                                      padding=0)

            self.batchN2 = BatchNorm3d(num_features=64)

            # Cocmpute output dimensions:
            dimconv21 = compute_conv_dim(im_dim=self.dim_max,
                                         padding=0,
                                         dilation=1,
                                         kernel_size=3,
                                         stride=1)
            dimconv22 = compute_conv_dim(im_dim=dimconv21,
                                         padding=0,
                                         dilation=1,
                                         kernel_size=3,
                                         stride=1)

            self.dim_max2 = compute_conv_dim(im_dim=dimconv22,
                                             padding=0,
                                             dilation=1,
                                             kernel_size=2,
                                             stride=2)

            # Third block
            self.conv31 = Conv3d(in_channels=64,
                                 out_channels=128,
                                 kernel_size=(1, 3, 3),
                                 stride=(1, 1, 1),
                                 padding=(0, 0, 0),
                                 bias=True)

            self.maxPool3 = MaxPool3d(kernel_size=(1, 2, 2),
                                      stride=(1, 2, 2),
                                      padding=0)

            self.batchN3 = BatchNorm3d(num_features=128)

            # Compute output dimensions
            dimconv31 = compute_conv_dim(im_dim=self.dim_max2,
                                         padding=0,
                                         dilation=1,
                                         kernel_size=3,
                                         stride=1)

            self.dim_max3 = compute_conv_dim(im_dim=dimconv31,
                                             padding=0,
                                             dilation=1,
                                             kernel_size=2,
                                             stride=2)

            # LSTM
            self.LSTM = LSTM(input_size=10368, hidden_size=128, num_layers=1)

            self.linear = Linear(in_features=3712, out_features=24, bias=True)
            self.l_out = Linear(in_features=24, out_features=3, bias=True)
Ejemplo n.º 25
0
    def __define_temporal_convolutional_block(self, input_shape,
                                              n_channels_per_branch_out,
                                              kernel_sizes, dilation_rates,
                                              layer_num, group_num):
        """
        Define 5 branches of convolutions that operate of channels of each group.
        """

        n_channels_in = input_shape[1]  # 每个group的输入维度

        dw_input_shape = list(input_shape)
        dw_input_shape[1] = n_channels_per_branch_out  # 每个branch的输出维度 = 输入维度

        # 下面的五个分支对应着论文中某个group的从右到左的五个分支
        # branch 1: dimension reduction only and no temporal conv
        layer_name = 'conv_b1_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name,
                layer)  #setattr(object, name, value),setattr是python的方法

        layer_name = 'bn_b1_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)
        layer_name = 'conv_b2_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b2_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(dw_input_shape, kernel_sizes[0],
                                     dilation_rates[0], layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b2_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)
        layer_name = 'conv_b3_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b3_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(dw_input_shape, kernel_sizes[1],
                                     dilation_rates[1], layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b3_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)
        layer_name = 'conv_b4_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b4_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(dw_input_shape, kernel_sizes[2],
                                     dilation_rates[2], layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b4_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # branch 5: dimension reduction followed by temporal max pooling
        layer_name = 'conv_b5_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3d(n_channels_in,
                       n_channels_per_branch_out,
                       kernel_size=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)
        layer_name = 'maxpool_b5_g%d_tc%d' % (group_num, layer_num)
        layer = MaxPool3d(kernel_size=(2, 1, 1), stride=(1, 1, 1))
        layer._name = layer_name
        setattr(self, layer_name, layer)
        layer_name = 'padding_b5_g%d_tc%d' % (group_num, layer_num)
        layer = torch.nn.ReplicationPad3d(
            (0, 0, 0, 0, 1, 0))  # left, right, top, bottom, front, back
        layer._name = layer_name
        setattr(self, layer_name, layer)
        layer_name = 'bn_b5_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNorm3d(n_channels_per_branch_out)
        layer._name = layer_name
        setattr(self, layer_name, layer)
Ejemplo n.º 26
0
    def __init__(self, pv):
        super(ResNet18Explicit3DConv, self).__init__()
        # self.conv1_relu = ConvolutionBlock(3, 64, pv)
        self.conv1 = Conv3d(in_channels=3,
                            out_channels=64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
        self.bn1 = BatchNorm3d(64)

        self.maxpool = MaxPool3d(kernel_size=3,
                                 padding=1,
                                 stride=2,
                                 dilation=1)

        # self.res2a_relu = ResidualBlock(64, 64, pv)
        self.conv2 = Conv3d(in_channels=64,
                            out_channels=64,
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn2 = BatchNorm3d(64)
        self.conv3 = Conv3d(in_channels=64,
                            out_channels=64,
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn3 = BatchNorm3d(64)

        # self.res2b_relu = ResidualBlock(64, 64, pv)
        self.conv4 = Conv3d(in_channels=64,
                            out_channels=64,
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn4 = BatchNorm3d(64)
        self.conv5 = Conv3d(in_channels=64,
                            out_channels=64,
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn5 = BatchNorm3d(64)

        # self.res3a_relu = ResidualBlockB(64, 128, pv)
        self.conv6 = Conv3d(in_channels=64,
                            out_channels=128,
                            kernel_size=1,
                            stride=2,
                            bias=False)
        self.bn6 = BatchNorm3d(128)
        self.conv7 = Conv3d(in_channels=64,
                            out_channels=128,
                            kernel_size=3,
                            stride=2,
                            padding=1,
                            bias=False)
        self.bn7 = BatchNorm3d(128)
        self.conv8 = Conv3d(in_channels=128,
                            out_channels=128,
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn8 = BatchNorm3d(128)

        # self.res3b_relu = ResidualBlock(128, 128, pv)
        self.conv9 = Conv3d(in_channels=128,
                            out_channels=128,
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn9 = BatchNorm3d(128)
        self.conv10 = Conv3d(in_channels=128,
                             out_channels=128,
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn10 = BatchNorm3d(128)

        # self.res4a_relu = ResidualBlockB(128, 256, pv)
        self.conv11 = Conv3d(in_channels=128,
                             out_channels=256,
                             kernel_size=1,
                             stride=2,
                             bias=False)
        self.bn11 = BatchNorm3d(256)
        self.conv12 = Conv3d(in_channels=128,
                             out_channels=256,
                             kernel_size=3,
                             stride=2,
                             padding=1,
                             bias=False)
        self.bn12 = BatchNorm3d(256)
        self.conv13 = Conv3d(in_channels=256,
                             out_channels=256,
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn13 = BatchNorm3d(256)

        # self.res4b_relu = ResidualBlock(256, 256, pv)
        self.conv14 = Conv3d(in_channels=256,
                             out_channels=256,
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn14 = BatchNorm3d(256)
        self.conv15 = Conv3d(in_channels=256,
                             out_channels=256,
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn15 = BatchNorm3d(256)

        # self.res5a_relu = ResidualBlockB(256, 512, pv)
        self.conv16 = Conv3d(in_channels=256,
                             out_channels=512,
                             kernel_size=1,
                             stride=2,
                             bias=False)
        self.bn16 = BatchNorm3d(512)
        self.conv17 = Conv3d(in_channels=256,
                             out_channels=512,
                             kernel_size=3,
                             stride=2,
                             padding=1,
                             bias=False)
        self.bn17 = BatchNorm3d(512)
        self.conv18 = Conv3d(in_channels=512,
                             out_channels=512,
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn18 = BatchNorm3d(512)

        # self.res5b_relu = ResidualBlock(512, 512, pv)
        self.conv19 = Conv3d(in_channels=512,
                             out_channels=512,
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn19 = BatchNorm3d(512)
        self.conv20 = Conv3d(in_channels=512,
                             out_channels=512,
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn20 = BatchNorm3d(512)

        self.avgpool = AdaptiveAvgPool3d(output_size=1)
        self.fc = torch.nn.Linear(512, pv.label_size)
Ejemplo n.º 27
0
    def __init__(self,
                 num_channels=3,
                 feat_channels=[32, 64, 128, 256, 256],
                 residual='conv',
                 is_dilated=False,
                 is_off_unit=False,
                 is_2d1d=False):
        super(unet3d, self).__init__()

        self.is_off_unit = is_off_unit

        # Encoder downsamplers
        self.pool1 = MaxPool3d((1, 2, 2))
        self.pool2 = MaxPool3d((1, 2, 2))
        self.pool3 = MaxPool3d((1, 2, 2))
        self.pool4 = MaxPool3d((1, 2, 2))

        # Encoder convolutions
        if is_dilated:
            self.conv_blk1 = Conv3D_Block_adv(num_channels,
                                              feat_channels[0],
                                              residual=residual,
                                              is_2d1d=is_2d1d)
            self.conv_blk2 = Conv3D_Block_adv(feat_channels[0],
                                              feat_channels[1],
                                              residual=residual,
                                              is_2d1d=is_2d1d)
            self.conv_blk3 = Conv3D_Block_adv(feat_channels[1],
                                              feat_channels[2],
                                              residual=residual,
                                              is_2d1d=is_2d1d)
            self.conv_blk4 = Conv3D_Block_adv(feat_channels[2],
                                              feat_channels[3],
                                              residual=residual,
                                              is_2d1d=is_2d1d)
            self.conv_blk5 = Conv3D_Block_adv(feat_channels[3],
                                              feat_channels[4],
                                              residual=residual,
                                              is_2d1d=is_2d1d)

            # Decoder convolutions
            self.dec_conv_blk4 = Conv3D_Block_adv(2 * feat_channels[3],
                                                  feat_channels[3],
                                                  residual=residual,
                                                  is_2d1d=is_2d1d)
            self.dec_conv_blk3 = Conv3D_Block_adv(2 * feat_channels[2],
                                                  feat_channels[2],
                                                  residual=residual,
                                                  is_2d1d=is_2d1d)
            self.dec_conv_blk2 = Conv3D_Block_adv(2 * feat_channels[1],
                                                  feat_channels[1],
                                                  residual=residual,
                                                  is_2d1d=is_2d1d)
            self.dec_conv_blk1 = Conv3D_Block_adv(2 * feat_channels[0],
                                                  feat_channels[0],
                                                  residual=residual,
                                                  is_2d1d=is_2d1d)

        else:
            # Encoder convolutions
            self.conv_blk1 = Conv3D_Block(num_channels,
                                          feat_channels[0],
                                          residual=residual,
                                          is_2d1d=is_2d1d)
            self.conv_blk2 = Conv3D_Block(feat_channels[0],
                                          feat_channels[1],
                                          residual=residual,
                                          is_2d1d=is_2d1d)
            self.conv_blk3 = Conv3D_Block(feat_channels[1],
                                          feat_channels[2],
                                          residual=residual,
                                          is_2d1d=is_2d1d)
            self.conv_blk4 = Conv3D_Block(feat_channels[2],
                                          feat_channels[3],
                                          residual=residual,
                                          is_2d1d=is_2d1d)
            self.conv_blk5 = Conv3D_Block(feat_channels[3],
                                          feat_channels[4],
                                          residual=residual,
                                          is_2d1d=is_2d1d)

            # Decoder convolutions
            self.dec_conv_blk4 = Conv3D_Block(2 * feat_channels[3],
                                              feat_channels[3],
                                              residual=residual,
                                              is_2d1d=is_2d1d)
            self.dec_conv_blk3 = Conv3D_Block(2 * feat_channels[2],
                                              feat_channels[2],
                                              residual=residual,
                                              is_2d1d=is_2d1d)
            self.dec_conv_blk2 = Conv3D_Block(2 * feat_channels[1],
                                              feat_channels[1],
                                              residual=residual,
                                              is_2d1d=is_2d1d)
            self.dec_conv_blk1 = Conv3D_Block(2 * feat_channels[0],
                                              feat_channels[0],
                                              residual=residual,
                                              is_2d1d=is_2d1d)

        if self.is_off_unit:
            self.off_unit_enc1 = OFF_Unit(feat_channels[0], feat_channels[0])
            self.off_unit_enc2 = OFF_Unit(feat_channels[1], feat_channels[1])
            self.off_unit_enc3 = OFF_Unit(feat_channels[2], feat_channels[2])
            self.off_unit_enc4 = OFF_Unit(feat_channels[3], feat_channels[3])
            self.off_unit_enc5 = OFF_Unit(feat_channels[4], feat_channels[4])

            self.off_unit_dec4 = OFF_Unit(feat_channels[3], feat_channels[3])
            self.off_unit_dec3 = OFF_Unit(feat_channels[2], feat_channels[2])
            self.off_unit_dec2 = OFF_Unit(feat_channels[1], feat_channels[1])
            self.off_unit_dec1 = OFF_Unit(feat_channels[0], feat_channels[0])

        # Decoder upsamplers
        self.deconv_blk4 = Deconv3D_Block(feat_channels[4], feat_channels[3])
        self.deconv_blk3 = Deconv3D_Block(feat_channels[3], feat_channels[2])
        self.deconv_blk2 = Deconv3D_Block(feat_channels[2], feat_channels[1])
        self.deconv_blk1 = Deconv3D_Block(feat_channels[1], feat_channels[0])

        # Final 1*1 Conv Segmentation map
        self.one_conv = Conv3d(feat_channels[0],
                               1,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=True)
Ejemplo n.º 28
0
    def __init__(self):
        super(ResNet18Explicit3DConvReduced, self).__init__()
        # self.conv1_relu = ConvolutionBlock(3, 64, pv)
        self.conv1 = Conv3d(in_channels=3,
                            out_channels=int(64 / 1.718),
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
        self.bn1 = BatchNorm3d(int(64 / 1.718))

        self.maxpool = MaxPool3d(kernel_size=3,
                                 padding=1,
                                 stride=2,
                                 dilation=1)

        # self.res2a_relu = ResidualBlock(int(64/1.718), int(64/1.718), pv)
        self.conv2 = Conv3d(in_channels=int(64 / 1.718),
                            out_channels=int(64 / 1.718),
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn2 = BatchNorm3d(int(64 / 1.718))
        self.conv3 = Conv3d(in_channels=int(64 / 1.718),
                            out_channels=int(64 / 1.718),
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn3 = BatchNorm3d(int(64 / 1.718))

        # self.res2b_relu = ResidualBlock(int(64/1.718), int(64/1.718), pv)
        self.conv4 = Conv3d(in_channels=int(64 / 1.718),
                            out_channels=int(64 / 1.718),
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn4 = BatchNorm3d(int(64 / 1.718))
        self.conv5 = Conv3d(in_channels=int(64 / 1.718),
                            out_channels=int(64 / 1.718),
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn5 = BatchNorm3d(int(64 / 1.718))

        # self.res3a_relu = ResidualBlockB(int(64/1.718), int(128/1.718), pv)
        self.conv6 = Conv3d(in_channels=int(64 / 1.718),
                            out_channels=int(128 / 1.718),
                            kernel_size=1,
                            stride=2,
                            bias=False)
        self.bn6 = BatchNorm3d(int(128 / 1.718))
        self.conv7 = Conv3d(in_channels=int(64 / 1.718),
                            out_channels=int(128 / 1.718),
                            kernel_size=3,
                            stride=2,
                            padding=1,
                            bias=False)
        self.bn7 = BatchNorm3d(int(128 / 1.718))
        self.conv8 = Conv3d(in_channels=int(128 / 1.718),
                            out_channels=int(128 / 1.718),
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn8 = BatchNorm3d(int(128 / 1.718))

        # self.res3b_relu = ResidualBlock(int(128/1.718), int(128/1.718), pv)
        self.conv9 = Conv3d(in_channels=int(128 / 1.718),
                            out_channels=int(128 / 1.718),
                            kernel_size=3,
                            padding=1,
                            bias=False)
        self.bn9 = BatchNorm3d(int(128 / 1.718))
        self.conv10 = Conv3d(in_channels=int(128 / 1.718),
                             out_channels=int(128 / 1.718),
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn10 = BatchNorm3d(int(128 / 1.718))

        # self.res4a_relu = ResidualBlockB(int(128/1.718), int(256/1.718), pv)
        self.conv11 = Conv3d(in_channels=int(128 / 1.718),
                             out_channels=int(256 / 1.718),
                             kernel_size=1,
                             stride=2,
                             bias=False)
        self.bn11 = BatchNorm3d(int(256 / 1.718))
        self.conv12 = Conv3d(in_channels=int(128 / 1.718),
                             out_channels=int(256 / 1.718),
                             kernel_size=3,
                             stride=2,
                             padding=1,
                             bias=False)
        self.bn12 = BatchNorm3d(int(256 / 1.718))
        self.conv13 = Conv3d(in_channels=int(256 / 1.718),
                             out_channels=int(256 / 1.718),
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn13 = BatchNorm3d(int(256 / 1.718))

        # self.res4b_relu = ResidualBlock(int(256/1.718), int(256/1.718), pv)
        self.conv14 = Conv3d(in_channels=int(256 / 1.718),
                             out_channels=int(256 / 1.718),
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn14 = BatchNorm3d(int(256 / 1.718))
        self.conv15 = Conv3d(in_channels=int(256 / 1.718),
                             out_channels=int(256 / 1.718),
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn15 = BatchNorm3d(int(256 / 1.718))

        # self.res5a_relu = ResidualBlockB(int(256/1.718), int(512/1.718), pv)
        self.conv16 = Conv3d(in_channels=int(256 / 1.718),
                             out_channels=int(512 / 1.718),
                             kernel_size=1,
                             stride=2,
                             bias=False)
        self.bn16 = BatchNorm3d(int(512 / 1.718))
        self.conv17 = Conv3d(in_channels=int(256 / 1.718),
                             out_channels=int(512 / 1.718),
                             kernel_size=3,
                             stride=2,
                             padding=1,
                             bias=False)
        self.bn17 = BatchNorm3d(int(512 / 1.718))
        self.conv18 = Conv3d(in_channels=int(512 / 1.718),
                             out_channels=int(512 / 1.718),
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn18 = BatchNorm3d(int(512 / 1.718))

        # self.res5b_relu = ResidualBlock(int(512/1.718), int(512/1.718), pv)
        self.conv19 = Conv3d(in_channels=int(512 / 1.718),
                             out_channels=int(512 / 1.718),
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn19 = BatchNorm3d(int(512 / 1.718))
        self.conv20 = Conv3d(in_channels=int(512 / 1.718),
                             out_channels=int(512 / 1.718),
                             kernel_size=3,
                             padding=1,
                             bias=False)
        self.bn20 = BatchNorm3d(int(512 / 1.718))

        self.avgpool = AdaptiveAvgPool3d(output_size=1)
        self.fc = torch.nn.Linear(int(512 / 1.718), 27)