def __init__(self,
                 channel_in,
                 growth_rate,
                 bottleneck_size_basic_factor,
                 drop_ratio=0.8):

        super(BachActivateConvLayer, self).__init__()

        self.drop_ratio = drop_ratio
        self.growth_rate = growth_rate
        self.bottleneck_channel_out = bottleneck_size_basic_factor * growth_rate

        self.mode_bn = InPlaceABNSync(channel_in)
        self.mode_conv = nn.Conv3d(channel_in,
                                   self.bottleneck_channel_out,
                                   kernel_size=1,
                                   stride=1,
                                   bias=False)

        self.bn = InPlaceABNSync(self.bottleneck_channel_out)
        self.conv = nn.Conv3d(self.bottleneck_channel_out,
                              growth_rate,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=False)

        self.drop_out = nn.Dropout3d(p=self.drop_ratio)
Ejemplo n.º 2
0
 def __init__(self, channel):
     super(GE_8_theoLayer, self).__init__()        
     self.gather = nn.Sequential(
         nn.Conv2d(channel, channel, kernel_size=3, stride=2, groups=channel,padding=2, bias=False),
         InPlaceABNSync(channel),
         nn.Conv2d(channel, channel, kernel_size=3, stride=2, groups=channel,padding=1, bias=False),
         InPlaceABNSync(channel),
         nn.Conv2d(channel, channel, kernel_size=3, stride=2, groups=channel,padding=1, bias=False),
         BatchNorm2d(channel)
     )
     self.gather_sigmoid = nn.Sigmoid()
Ejemplo n.º 3
0
    def __init__(self, features, out_features=512):

        super(PGEC_Module, self).__init__()
        self.conv1 = nn.Sequential(nn.Conv2d(features, out_features, kernel_size=3, padding=1, dilation=1, bias=True),
                                   BatchNorm2d(out_features)
                                   )

        self.conv2 = nn.Sequential(GE_theoLayer(features,30),
                                   nn.Conv2d(features, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
                                   BatchNorm2d(out_features))

        self.conv3 = nn.Sequential(GE_4_theoLayer(features),
                                   nn.Conv2d(features, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
                                   BatchNorm2d(out_features))
       
        self.conv4 = nn.Sequential(GE_8_theoLayer(features),
                                   nn.Conv2d(features, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
                                   BatchNorm2d(out_features))
        

        self.conv5 = nn.Sequential(GE_16_theoLayer(features),
                                   nn.Conv2d(features, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
                                   BatchNorm2d(out_features))
        
        
        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(out_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(out_features),
            nn.Dropout2d(0.1)
            )
Ejemplo n.º 4
0
    def __init__(self, input_channel_number, output_channel_number):

        super(TransitionLayer, self).__init__()

        self.bn   = InPlaceABNSync(input_channel_number)
        self.conv = nn.Conv3d(input_channel_number, output_channel_number, kernel_size=1, stride=1, bias=False)
        self.pool = nn.AvgPool3d(kernel_size=2, stride=2)
Ejemplo n.º 5
0
    def __init__(self, n_in, n_out, stride=1):
        super(PostRes, self).__init__()
        self.conv1 = nn.Conv3d(n_in,
                               n_out,
                               kernel_size=3,
                               stride=stride,
                               padding=1)
        self.bn1 = InPlaceABNSync(n_in)
        self.conv2 = nn.Conv3d(n_out, n_out, kernel_size=3, padding=1)
        self.bn2 = InPlaceABNSync(n_out)

        if stride != 1 or n_out != n_in:
            self.shortcut = nn.Sequential(
                nn.Conv3d(n_in, n_out, kernel_size=1, stride=stride))

        else:
            self.shortcut = None
Ejemplo n.º 6
0
    def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)):
        super(PSPModule, self).__init__()

        self.stages = []
        self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes])
        self.bottleneck = nn.Sequential(
            nn.Conv2d(features+len(sizes)*out_features, out_features, kernel_size=3, padding=1, dilation=1, bias=False),
            InPlaceABNSync(out_features)
            )
Ejemplo n.º 7
0
    def __init__(self, growth_rate=32, block_config=(4, 4, 4, 4), compression=0.5, num_init_features=24, bottleneck_size_basic_factor=4, drop_rate=0, num_classes=2, small_inputs=True):

        super(DenseNet, self).__init__()

        self.features = nn.Conv3d(2, num_init_features, kernel_size=3, stride=1, padding=1, bias=False)

        self.init_feature_channel_number = num_init_features
        self.growth_rate = growth_rate
        self.compression = compression
        self.number_class = num_classes
        self.block_config = block_config

        num_features = num_init_features


        self.dense_trainsition_out_put_list = []

        for i, num_layers in enumerate(self.block_config):

            block = DenseBlock(num_layers, num_features, self.growth_rate, bottleneck_size_basic_factor, drop_rate)

            setattr(self, 'block_' + str(i), block)

            num_features = num_features + num_layers * growth_rate


            if i != len(block_config) - 1:

                transition_layer = TransitionLayer(num_features, int(num_features * self.compression))

                setattr(self, 'block_transition_' + str(i), transition_layer)

                num_features = int(num_features * self.compression)

            self.dense_trainsition_out_put_list.append(num_features)

        # self.shuortcut_connect_layer = nn.Sequential(nn.Conv3d(self.dense_trainsition_out_put_list[0]*2 + self.dense_trainsition_out_put_list[2], self.dense_trainsition_out_put_list[2],1),
        #                                              InPlaceABNSync(self.dense_trainsition_out_put_list[2]))

        self.shuortcut_connect_layer = nn.Conv3d(self.dense_trainsition_out_put_list[0]*2 + self.dense_trainsition_out_put_list[2], self.dense_trainsition_out_put_list[2],1)

        self.finally_bn = InPlaceABNSync(num_features)
        self.binary_classifier = nn.Linear(num_features, self.number_class)
        self.multiple_classifier = nn.Linear(num_features, 6)


        for name, param in self.named_parameters():
            if 'conv' in name and 'weight' in name:
                n = param.size(0) * param.size(2) * param.size(3)* param.size(4)
                param.data.normal_().mul_(math.sqrt(2. / n))
            elif 'norm' in name and 'weight' in name:
                param.data.fill_(1)
            elif 'norm' in name and 'bias' in name:
                param.data.fill_(0)
            elif 'classifier' in name and 'bias' in name:
                param.data.fill_(0)
    def __init__(self, features, out_features=512, inner_features=256, dilations=(12, 24, 36)):
        super(ASPPModule, self).__init__()

        self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                   nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
                                             bias=False),
                                   InPlaceABNSync(inner_features))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(inner_features))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
            InPlaceABNSync(inner_features))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
            InPlaceABNSync(inner_features))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
            InPlaceABNSync(inner_features))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(out_features),
            nn.Dropout2d(0.1)
        )
Ejemplo n.º 9
0
    def __init__(self, inplanes, planes, index , senet_global_average, expansion = 2, stride=1, downsample=None):

        super(BottleNeckResblock, self).__init__()

        self.bn1 = InPlaceABNSync(inplanes)

        self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)

        self.bn2 = InPlaceABNSync(planes)

        self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)

        self.bn3 = InPlaceABNSync(planes)

        self.conv3 = nn.Conv3d(planes, planes * expansion, kernel_size=1, bias=False)

        self.relu = nn.LeakyReLU(inplace=True)

        self.downsample = downsample

        self.stride = stride

        self.se_compont = SEComponent(planes * expansion, index , senet_global_average, se_resize_factor = 8)
    def __init__(self, num_classes):
        super(Decoder_Module, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(512,
                      256,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(256))
        self.conv2 = nn.Sequential(
            nn.Conv2d(256,
                      48,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(48))
        self.conv3 = nn.Sequential(
            nn.Conv2d(304,
                      256,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(256),
            nn.Conv2d(256,
                      256,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(256))

        self.conv4 = nn.Conv2d(256,
                               num_classes,
                               kernel_size=1,
                               padding=0,
                               dilation=1,
                               bias=True)
    def __init__(self, in_fea=[256, 512, 1024], mid_fea=256, out_fea=2):
        super(Edge_Module, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_fea[0],
                      mid_fea,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(mid_fea))
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_fea[1],
                      mid_fea,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(mid_fea))
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_fea[2],
                      mid_fea,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(mid_fea))
        self.conv4 = nn.Conv2d(mid_fea,
                               out_fea,
                               kernel_size=3,
                               padding=1,
                               dilation=1,
                               bias=True)
        self.conv5 = nn.Conv2d(out_fea * 3,
                               out_fea,
                               kernel_size=1,
                               padding=0,
                               dilation=1,
                               bias=True)
 def __init__(self,
              in_channels,
              out_channels,
              key_channels,
              value_channels,
              dropout,
              sizes=([1])):
     super(BaseOC_Module, self).__init__()
     self.stages = []
     self.stages = nn.ModuleList([
         self._make_stage(in_channels, out_channels, key_channels,
                          value_channels, size) for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(2 * in_channels, out_channels, kernel_size=1, padding=0),
         InPlaceABNSync(out_channels), nn.Dropout2d(dropout))
    def __init__(self, features, out_features=256, dilations=(12, 24, 36)):
        super(ASP_OC_Module, self).__init__()
        self.context = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=True), InPlaceABNSync(out_features),
            BaseOC_Context_Module(in_channels=out_features,
                                  out_channels=out_features,
                                  key_channels=out_features // 2,
                                  value_channels=out_features,
                                  dropout=0,
                                  sizes=([2])))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_features))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[0],
                      dilation=dilations[0],
                      bias=False), InPlaceABNSync(out_features))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[1],
                      dilation=dilations[1],
                      bias=False), InPlaceABNSync(out_features))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[2],
                      dilation=dilations[2],
                      bias=False), InPlaceABNSync(out_features))

        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(out_features * 5,
                      out_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_features),
            nn.Dropout2d(0.1))
Ejemplo n.º 14
0
    def make_res_block(self, block, planes, blocks, se_index, se_average_size, stride=1, expansion = 2, down_sample_padding = 0, can_down_sample = True):

        downsample = None

        if (stride != 1 or self.inplanes != planes * expansion) and can_down_sample == True: #需要下采样
            downsample = nn.Sequential(
                InPlaceABNSync(self.inplanes, activation='none'),
                nn.Conv3d(self.inplanes, planes * expansion, kernel_size=1, stride=stride, padding=down_sample_padding, bias=False))

        layers = []
        layers.append(block(self.inplanes, planes, se_index, se_average_size, expansion = expansion, stride = stride, downsample = downsample))
        # self.inplanes = planes * block.expansion
        self.inplanes = planes * expansion

        for i in range(1, blocks):

            layers.append(block(self.inplanes, planes, se_index, se_average_size, expansion=expansion))

        return nn.Sequential(*layers)
    def __init__(self, block, layers, num_classes):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=1,
                                       dilation=2,
                                       multi_grid=(1, 1, 1))

        self.context_encoding = PSPModule(2048, 512)

        self.edge = Edge_Module()
        self.decoder = Decoder_Module(num_classes)

        self.fushion = nn.Sequential(
            nn.Conv2d(1024,
                      256,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(256), nn.Dropout2d(0.1),
            nn.Conv2d(256,
                      num_classes,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=True))
Ejemplo n.º 16
0
    def __init__(self, block = BottleNeckResblock , layers =  [3,4, 23,3], num_classes=2):

        super(SENet, self).__init__()

        # block_channel_in = [32,64,128,256]
        block_channel_planes = [32, 64, 128, 256]

        senet_global_average = [48,24,12,6]

        self.inplanes = 32

        # self.preblock = nn.Sequential(nn.Conv3d(2, self.inplanes, kernel_size=3,stride=1, padding=1, bias=False),
        #                               InPlaceABNSync(self.inplanes),
        #                               nn.Conv3d(self.inplanes, self.inplanes, kernel_size=3,stride=1, padding=1, bias=False))

        self.preblock = nn.Conv3d(2, self.inplanes, kernel_size=3,stride=1, padding=1, bias=False)

        # self.begin_con = nn.Conv3d(2, self.inplanes, kernel_size=3,stride=1, padding=1, bias=False)
        # self.begin_con_bn = InPlaceABNSync(self.inplanes)

        self.avgpool = nn.AvgPool3d(6, stride=1)

        self.finally_bn = InPlaceABNSync(block_channel_planes[-1] * 2)

        self.binary_class_layer   = nn.Linear(block_channel_planes[-1] * 2, 2)
        self.multiple_class_layer = nn.Linear(block_channel_planes[-1] * 2, 6)

        self.layer1 = self.make_res_block(block, block_channel_planes[0], layers[0], 0, senet_global_average, expansion = 2, stride=1,down_sample_padding=0)
        self.layer2 = self.make_res_block(block, block_channel_planes[1], layers[1], 1, senet_global_average, expansion = 2, stride=2)
        self.layer3 = self.make_res_block(block, block_channel_planes[2], layers[2], 2, senet_global_average, expansion = 2, stride=2)
        self.layer4 = self.make_res_block(block, block_channel_planes[3], layers[3], 3, senet_global_average, expansion = 2, stride=2)

        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
            elif isinstance(m, InPlaceABNSync):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
 def __init__(self,
              in_channels,
              key_channels,
              value_channels,
              out_channels=None,
              scale=1):
     super(_SelfAttentionBlock, self).__init__()
     self.scale = scale
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.key_channels = key_channels
     self.value_channels = value_channels
     if out_channels == None:
         self.out_channels = in_channels
     self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
     self.f_key = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels,
                   out_channels=self.key_channels,
                   kernel_size=1,
                   stride=1,
                   padding=0),
         InPlaceABNSync(self.key_channels),
     )
     self.f_query = self.f_key
     self.f_value = nn.Conv2d(in_channels=self.in_channels,
                              out_channels=self.value_channels,
                              kernel_size=1,
                              stride=1,
                              padding=0)
     self.W = nn.Conv2d(in_channels=self.value_channels,
                        out_channels=self.out_channels,
                        kernel_size=1,
                        stride=1,
                        padding=0)
     nn.init.constant(self.W.weight, 0)
     nn.init.constant(self.W.bias, 0)
Ejemplo n.º 18
0
 def _make_stage(self, features, out_features, size):
     prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
     conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)
     bn = InPlaceABNSync(out_features)
     return nn.Sequential(prior, conv, bn)
Ejemplo n.º 19
0
    def __init__(self, args):

        super(Net, self).__init__()

        # self.preBlock = nn.Sequential(
        #     nn.Conv3d(1, 32, kernel_size = (1 ,3 ,3), padding = (0 ,1 ,1)),
        #     InPlaceABNSync(32),
        #     nn.Conv3d(32, 32, kernel_size = (1 ,3 ,3), padding = (0 ,1 ,1)),
        #     InPlaceABNSync(32),
        #     # nn.MaxPool3d(kernel_size=(2,2,2),stride=(2, 1, 1),return_indices=False), #add by lxw
        #     nn.Conv3d(32, 32, kernel_size = (1 ,3 ,3), padding = (0 ,1 ,1)),
        #     InPlaceABNSync(32))
        self.args = args
        self.multiple_channel = self.args.multiple_channel

        self.preBlock = nn.Sequential(
            nn.Conv3d(2, 32, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            InPlaceABNSync(32),
            nn.Conv3d(32, 32, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            InPlaceABNSync(32),
            nn.Conv3d(32, 32, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            InPlaceABNSync(32),
            nn.Conv3d(32, 32, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            InPlaceABNSync(32))

        num_blocks_forw = [4, 4, 8, 8]
        self.featureNum_forw = [32, 32, 64, 64, 64]

        for i in range(len(num_blocks_forw)):
            blocks = []
            for j in range(num_blocks_forw[i]):
                if j == 0:
                    blocks.append(
                        PostRes(self.featureNum_forw[i],
                                self.featureNum_forw[i + 1]))
                else:
                    blocks.append(
                        PostRes(self.featureNum_forw[i + 1],
                                self.featureNum_forw[i + 1]))
            print('forw' + str(i + 1), self.featureNum_forw[i],
                  self.featureNum_forw[i + 1])
            setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks))

        self.maxpool = nn.MaxPool3d(kernel_size=2,
                                    stride=2,
                                    return_indices=True)

        self.maxpool_2d = nn.MaxPool3d(kernel_size=(2, 2, 2),
                                       stride=(2, 2, 2),
                                       return_indices=True)

        self.drop = nn.Dropout3d(p=0.5, inplace=False)

        self.average_pooling = torch.nn.AvgPool3d((3, 3, 3), stride=1)

        self.dense_connect_cnn = torch.nn.Sequential(
            torch.nn.Linear(64 * 1 * 1 * 1, 128), torch.nn.Dropout(0.5),
            torch.nn.ReLU(), torch.nn.Linear(128, 128), torch.nn.Dropout(0.5),
            torch.nn.ReLU())

        self.binary_classification_out = torch.nn.Linear(128, 2)
        self.multi_classification_out = torch.nn.Linear(
            128, self.multiple_channel)

        for m in self.modules():
            if isinstance(m, nn.BatchNorm3d) or isinstance(m, InPlaceABNSync):
                m.momentum = 0.01