Esempio n. 1
0
    def __init__(self, features, in_features):
        super(CAM, self).__init__()

        self.pool = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=(3, 3)),
            nn.Conv2d(features, features, kernel_size=1, bias=False),
            InPlaceABNSync(features),
            nn.Conv2d(features,
                      in_features,
                      kernel_size=3,
                      padding=1,
                      bias=False), InPlaceABNSync(in_features))

        self.adapt = nn.Sequential(
            nn.Conv2d(features, features, kernel_size=1, bias=False),
            InPlaceABNSync(features),
            nn.Conv2d(features,
                      in_features,
                      kernel_size=3,
                      padding=1,
                      bias=False), InPlaceABNSync(in_features))

        self.delta_gen = nn.Sequential(
            nn.Conv2d(in_features * 2, in_features, kernel_size=1, bias=False),
            InPlaceABNSync(in_features),
            nn.Conv2d(in_features, 2, kernel_size=3, padding=1, bias=False))
        self.delta_gen[2].weight.data.zero_()
Esempio n. 2
0
    def __init__(self, in_channels, out_channels, num_classes):
        super(RCCAModule, self).__init__()
        inter_channels = in_channels // 4
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            InPlaceABNSync(inter_channels))
        self.cca = CrissCrossAttention(inter_channels)
        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False), InPlaceABNSync(inter_channels))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_channels),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
Esempio n. 3
0
    def __init__(self, features):
        super(CAB, self).__init__()

        self.delta_gen1 = nn.Sequential(
            nn.Conv2d(features * 2, features, kernel_size=1, bias=False),
            InPlaceABNSync(features),
            nn.Conv2d(features, 2, kernel_size=3, padding=1, bias=False))

        self.delta_gen2 = nn.Sequential(
            nn.Conv2d(features * 2, features, kernel_size=1, bias=False),
            InPlaceABNSync(features),
            nn.Conv2d(features, 2, kernel_size=3, padding=1, bias=False))

        self.delta_gen1[2].weight.data.zero_()
        self.delta_gen2[2].weight.data.zero_()
    def __init__(self, block, layers, num_classes, criterion):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1,1,1))

        self.head = nn.Sequential(ASPPModule(2048),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))

        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
            )
        self.criterion = criterion
Esempio n. 5
0
    def __init__(self, block, layers, num_classes):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2, multi_grid=(1, 1, 1))

        self.context_encoding = PSPModule(2048, 512)

        self.edge = Edge_Module()
        self.decoder = Decoder_Module(num_classes)

        self.fushion = nn.Sequential(
            nn.Conv2d(1024, 256, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(256),
            nn.Dropout2d(0.1),
            nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True)
        )
Esempio n. 6
0
 def __init__(self, inChannels, growth_Channels):
     super(SingleLayerBN, self).__init__()
     self.bn = InPlaceABNSync(inChannels)
     self.conv = nn.Conv3d(inChannels,
                           growth_Channels,
                           kernel_size=3,
                           padding=1)
Esempio n. 7
0
    def _make_denseBN(
            self,
            inChannels,
            growth_Channels,
            nDenseLayers,
            z_dilation=(7, 5, 2, 1),
            z_padding=(7, 5, 2, 1),
    ):
        layers = []
        for i in range(int(nDenseLayers)):
            if i == 0:
                layers.append(
                    nn.Conv3d(inChannels,
                              growth_Channels,
                              kernel_size=3,
                              padding=1))
                inChannels = growth_Channels
            else:
                layers.append(
                    SingleLayerBN(
                        inChannels,
                        growth_Channels,
                        z_dilation=z_dilation[i % len(z_dilation)],
                        z_padding=z_padding[i % len(z_dilation)],
                    ))
                inChannels += growth_Channels
        layers.append(InPlaceABNSync(inChannels))

        return nn.Sequential(*layers)
Esempio n. 8
0
    def __init__(self, in_fea=[256, 512, 1024], mid_fea=256, out_fea=2):
        super(Edge_Module, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_fea[0], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(mid_fea)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_fea[1], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(mid_fea)
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_fea[2], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(mid_fea)
        )
        self.conv4 = nn.Conv2d(mid_fea, out_fea, kernel_size=3, padding=1, dilation=1, bias=True)
        self.conv5 = nn.Conv2d(out_fea * 3, out_fea, kernel_size=1, padding=0, dilation=1, bias=True)
Esempio n. 9
0
    def __init__(self, num_classes):
        super(Decoder_Module, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(512, 256, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(256)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(256, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(48)
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(304, 256, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(256),
            nn.Conv2d(256, 256, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(256)
        )

        self.conv4 = nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True)
Esempio n. 10
0
 def _make_stage(self, features, out_features, size):
     prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
     conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)
     bn = InPlaceABNSync(out_features)
     return nn.Sequential(
         prior,
         conv,
         bn,
         act_cfg(),
     )
Esempio n. 11
0
    def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)):
        super(PSPModule, self).__init__()

        self.stages = []
        self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes])
        self.bottleneck = nn.Sequential(
            nn.Conv2d(features + len(sizes) * out_features, out_features, kernel_size=3, padding=1, dilation=1,
                      bias=False),
            InPlaceABNSync(out_features),
        )
Esempio n. 12
0
 def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
     super(DownBlockBN, self).__init__()
     self.conv = nn.Conv3d(
         in_planes,
         out_planes,
         kernel_size=kernel_size,
         stride=stride,
         padding=padding,
     )
     self.bn = InPlaceABNSync(out_planes)
Esempio n. 13
0
    def __init__(self, features, inner_features=256, out_features=512, dilations=(12, 24, 36)):
        super(ASPPModule, self).__init__()

        self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                   nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
                                             bias=False),
                                   InPlaceABNSync(inner_features))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(inner_features))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
            InPlaceABNSync(inner_features))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
            InPlaceABNSync(inner_features))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
            InPlaceABNSync(inner_features))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(out_features),
            nn.Dropout2d(0.1)
        )
Esempio n. 14
0
    def BNReLU(num_features, bn_type=None, **kwargs):
        if bn_type == 'torchbn':
            return nn.Sequential(nn.BatchNorm2d(num_features, **kwargs),
                                 nn.ReLU())
        elif bn_type == 'torchsyncbn':
            return nn.Sequential(nn.SyncBatchNorm(num_features, **kwargs),
                                 nn.ReLU())
        elif bn_type == 'syncbn':
            from ..extensions.syncbn.module import BatchNorm2d
            return nn.Sequential(BatchNorm2d(num_features, **kwargs),
                                 nn.ReLU())
        elif bn_type == 'sn':
            from ..extensions.switchablenorms.switchable_norm import SwitchNorm2d
            return nn.Sequential(SwitchNorm2d(num_features, **kwargs),
                                 nn.ReLU())
        elif bn_type == 'gn':
            return nn.Sequential(
                nn.GroupNorm(num_groups=8, num_channels=num_features,
                             **kwargs), nn.ReLU())
        elif bn_type == 'fn':
            Log.error('Not support Filter-Response-Normalization: {}.'.format(
                bn_type))
            exit(1)
        elif bn_type == 'inplace_abn':
            torch_ver = torch.__version__[:3]
            # Log.info('Pytorch Version: {}'.format(torch_ver))
            if torch_ver == '0.4':
                from ..extensions.inplace_abn.bn import InPlaceABNSync
                return InPlaceABNSync(num_features, **kwargs)
            elif torch_ver == '1.0' or torch_ver == '1.1':
                from ..extensions.inplace_abn_1.bn import InPlaceABNSync
                return InPlaceABNSync(num_features, **kwargs)
            elif torch_ver == '1.2':
                from inplace_abn import InPlaceABNSync
                return InPlaceABNSync(num_features, **kwargs)

        else:
            Log.error('Not support BN type: {}.'.format(bn_type))
            exit(1)
Esempio n. 15
0
    def __init__(self, features, out_features=512):
        super(RRB, self).__init__()

        self.unify = nn.Conv2d(features,
                               out_features,
                               kernel_size=1,
                               padding=0,
                               dilation=1,
                               bias=False)
        self.residual = nn.Sequential(
            nn.Conv2d(out_features,
                      out_features // 4,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_features // 4),
            nn.Conv2d(out_features // 4,
                      out_features,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False))
        self.norm = InPlaceABNSync(out_features)
Esempio n. 16
0
 def _make_denseBN(self, inChannels, growth_Channels, nDenseLayers):
     layers = []
     for i in range(int(nDenseLayers)):
         if i == 0:
             layers.append(
                 nn.Conv3d(inChannels,
                           growth_Channels,
                           kernel_size=3,
                           padding=1))
             inChannels = growth_Channels
         else:
             layers.append(SingleLayerBN(inChannels, growth_Channels))
             inChannels += growth_Channels
     layers.append(InPlaceABNSync(inChannels))
     return nn.Sequential(*layers)
Esempio n. 17
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1):
     super(PreConvBN3d, self).__init__()
     self.bn = InPlaceABNSync(in_planes)
     self.conv = nn.Conv3d(
         in_planes,
         out_planes,
         kernel_size=kernel_size,
         stride=stride,
         padding=padding,
         dilation=dilation,
     )
Esempio n. 18
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1):
     super(BasicConvTransposeBN3d, self).__init__()
     self.convTranspose = nn.ConvTranspose3d(
         in_planes,
         out_planes,
         kernel_size=kernel_size,
         stride=stride,
         padding=padding,
         dilation=dilation,
     )
     self.bn = InPlaceABNSync(out_planes)
Esempio n. 19
0
File: __init__.py Progetto: w-hc/pcv
def convert_inplace_sync_batchnorm(module, process_group=None):
    r"""Helper function to convert `torch.nn.BatchNormND` layer in the model to
    `torch.nn.SyncBatchNorm` layer.
    Args:
        module (nn.Module): containing module
        process_group (optional): process group to scope synchronization,
    default is the whole world
    Returns:
        The original module with the converted `torch.nn.SyncBatchNorm` layer
    Example::
        >>> # Network with nn.BatchNorm layer
        >>> module = torch.nn.Sequential(
        >>>            torch.nn.Linear(20, 100),
        >>>            torch.nn.BatchNorm1d(100)
        >>>          ).cuda()
        >>> # creating process group (optional)
        >>> # process_ids is a list of int identifying rank ids.
        >>> process_group = torch.distributed.new_group(process_ids)
        >>> sync_bn_module = convert_sync_batchnorm(module, process_group)
    """
    from inplace_abn import InPlaceABNSync
    module_output = module
    if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
        module_output = InPlaceABNSync(module.num_features,
                                        module.eps, module.momentum,
                                        module.affine,
                                        activation='identity',
                                        group=process_group)
        if module.affine:
            module_output.weight.data = module.weight.data.clone().detach()
            module_output.bias.data = module.bias.data.clone().detach()
            # keep reuqires_grad unchanged
            module_output.weight.requires_grad = module.weight.requires_grad
            module_output.bias.requires_grad = module.bias.requires_grad
        module_output.running_mean = module.running_mean
        module_output.running_var = module.running_var
    if isinstance(module, torch.nn.ReLU):
        module_output = torch.nn.ReLU()
    for name, child in module.named_children():
        module_output.add_module(name, convert_inplace_sync_batchnorm(child, process_group))
    del module
    return module_output
Esempio n. 20
0
    def __init__(self, block, layers, num_classes, criterion):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        # self.CAM = RCCAModule(2048, 256)
        # self.CAM = nn.Sequential(
        #                 nn.AdaptiveAvgPool2d(output_size=(1, 1)),
        #                 nn.Conv2d(2048, 256, kernel_size=1, bias=False),
        #                 InPlaceABNSync(256))
        self.CAM = CAM(2048, 256)
        self.RRB5a = RRB(256, 256)
        self.CAB5 = CAB(256)
        self.RRB5b = RRB(256, 256)
        self.RRB4a = RRB(2048, 256)
        self.CAB4 = CAB(256)
        self.RRB4b = RRB(256, 256)
        self.RRB3a = RRB(1024, 256)
        self.CAB3 = CAB(256)
        self.RRB3b = RRB(256, 256)
        self.RRB2a = RRB(512, 256)
        self.CAB2 = CAB(256)
        self.RRB2b = RRB(256, 256)
        self.RRB1a = RRB(256, 256)

        self.dsn = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(256), nn.Dropout2d(0.1),
            nn.Conv2d(256,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))

        self.head = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(256), nn.Dropout2d(0.1),
            nn.Conv2d(256,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.criterion = criterion
Esempio n. 21
0
    def __init__(self, block, layers, num_classes, deep_base=True):
        super(ResNet, self).__init__()
        self.deep_base = deep_base
        if not self.deep_base:
            self.inplanes = 64
            self.conv1 = nn.Conv2d(3,
                                   64,
                                   kernel_size=7,
                                   stride=2,
                                   padding=3,
                                   bias=False)
            self.bn1 = BatchNorm2d(64)
        else:
            self.inplanes = 128
            self.conv1 = conv3x3(3, 64, stride=2)
            self.bn1 = BatchNorm2d(64)
            self.relu1 = nn.ReLU(inplace=False)
            self.conv2 = conv3x3(64, 64)
            self.bn2 = BatchNorm2d(64)
            self.relu2 = nn.ReLU(inplace=False)
            self.conv3 = conv3x3(64, 128)
            self.bn3 = BatchNorm2d(128)
            self.relu3 = nn.ReLU(inplace=False)

        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # skd
        # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False) # mmseg
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=1,
                                       dilation=2)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=1,
                                       dilation=4,
                                       multi_grid=(1, 1, 1))

        if layers == [3, 4, 23, 3]:
            self.pspmodule = PSPModule(2048, 512)
            self.head = nn.Conv2d(512,
                                  num_classes,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0,
                                  bias=True)

            self.dsn = nn.Sequential(
                nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
                InPlaceABNSync(512), act_cfg(), nn.Dropout2d(0.1),
                nn.Conv2d(512,
                          num_classes,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=True))
        elif layers == [2, 2, 2, 2]:
            self.pspmodule = PSPModule(512, 128)
            self.head = nn.Conv2d(128,
                                  num_classes,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0,
                                  bias=True)

            self.dsn = nn.Sequential(
                nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
                InPlaceABNSync(128),
                nn.Dropout2d(0.1),
                nn.Conv2d(128,
                          num_classes,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=True),  # skd
                # nn.Conv2d(128, num_classes, kernel_size=1, stride=1, padding=0, bias=False), # mmseg
            )
        else:
            raise ValueError('layers should be [3, 4, 23, 3] or [2, 2, 2, 2]')
Esempio n. 22
0
    def __init__(self):
        super(SKUNET100_DICE_BN_NEW_MOD4_EX5, self).__init__()

        self.relu = nn.ReLU(inplace=True)
        self.pixel_shuffle = nn.PixelShuffle(2)
        self.upsample = nn.Upsample(scale_factor=2, mode="trilinear")
        self.upsamplez = nn.Upsample(scale_factor=(1, 2, 2), mode="trilinear")
        #        self.act1 = my_act()

        self.in_layer = nn.Conv3d(1, 48, kernel_size=5, padding=2)  # 32,320
        self.in_layer_bn = InPlaceABNSync(48)

        self.lconvlayer1 = nn.Conv3d(48,
                                     48,
                                     kernel_size=(5, 5, 5),
                                     stride=(1, 2, 2),
                                     padding=(2, 2, 2))  # 32,160
        self.lconvlayer1_bn = InPlaceABNSync(48)

        self.lconvlayer2 = nn.Conv3d(48,
                                     96,
                                     kernel_size=(5, 5, 5),
                                     stride=(1, 2, 2),
                                     padding=(2, 2, 2))  # 32,80
        self.lconvlayer2_bn = InPlaceABNSync(96)

        self.lconvlayer3 = nn.Conv3d(96,
                                     96,
                                     kernel_size=4,
                                     stride=2,
                                     padding=1)  # 16,40
        self.lconvlayer3_bn = InPlaceABNSync(96)

        self.lconvlayer4 = nn.Conv3d(96,
                                     192,
                                     kernel_size=4,
                                     stride=2,
                                     padding=1)  # 8,20
        self.lconvlayer4_bn = InPlaceABNSync(192)

        self.lconvlayer5 = nn.Conv3d(192,
                                     192,
                                     kernel_size=4,
                                     stride=2,
                                     padding=1)  # 4,10
        self.lconvlayer5_bn = InPlaceABNSync(192)

        self.lconvlayer6 = nn.Conv3d(192,
                                     192,
                                     kernel_size=4,
                                     stride=2,
                                     padding=1)  # 2, 5
        self.lconvlayer6_bn = InPlaceABNSync(192)

        # self.lconvlayer7 = nn.Conv3d(128, 256, kernel_size=4, stride=2, padding=1)  # 2, 5
        # self.lconvlayer7_bn = InPlaceABN(256)
        #
        # self.rconvTlayer7 = nn.Conv3d(256, 128, kernel_size=3, padding=1)
        self.rconvlayer7 = nn.Conv3d(192, 192, kernel_size=3, padding=1)
        self.rconvlayer7_bn = InPlaceABNSync(192)

        self.rconvTlayer6 = nn.Conv3d(192, 192, kernel_size=3, padding=1)
        self.rconvTlayer6_bn = InPlaceABNSync(192)
        self.rconvlayer6 = nn.Conv3d(192, 192, kernel_size=3, padding=1)
        self.rconvlayer6_bn = InPlaceABNSync(192)

        self.rconvTlayer5 = nn.Conv3d(192, 192, kernel_size=3, padding=1)
        self.rconvTlayer5_bn = InPlaceABNSync(192)
        self.rconvlayer5 = nn.Conv3d(192, 192, kernel_size=3, padding=1)
        self.rconvlayer5_bn = InPlaceABNSync(192)

        self.rconvTlayer4 = nn.Conv3d(192, 96, kernel_size=5, padding=2)
        self.rconvTlayer4_bn = InPlaceABNSync(96)
        self.rconvlayer4 = nn.Conv3d(96, 96, kernel_size=5, padding=2)
        self.rconvlayer4_bn = InPlaceABNSync(96)

        self.rconvTlayer3 = nn.Conv3d(96, 96, kernel_size=5, padding=2)
        self.rconvTlayer3_bn = InPlaceABNSync(96)
        self.rconvlayer3 = nn.Conv3d(96, 96, kernel_size=5, padding=2)
        self.rconvlayer3_bn = InPlaceABNSync(96)

        #        self.rconvTlayer2 = nn.ConvTranspose3d(64, 32, kernel_size = (1,2,2), stride = (1,2,2))
        self.rconvTlayer2 = nn.Conv3d(96, 48, kernel_size=5, padding=2)
        self.rconvTlayer2_bn = InPlaceABNSync(48)
        self.rconvlayer2 = nn.Conv3d(48, 48, kernel_size=5, padding=2)
        self.rconvlayer2_bn = InPlaceABNSync(48)

        self.rconvTlayer1 = nn.Conv3d(48, 48, kernel_size=5, padding=2)
        self.rconvTlayer1_bn = InPlaceABNSync(48)
        #        self.rconvTlayer1 = nn.ConvTranspose3d(32, 32, kernel_size = (1,2,2), stride = (1,2,2))
        self.rconvlayer1 = nn.Conv3d(48, 48, kernel_size=5, padding=2)
        self.rconvlayer1_bn = InPlaceABNSync(48)

        self.out_layer = nn.Conv3d(48, 1, kernel_size=1, stride=1)
Esempio n. 23
0
    def __init__(self, block, layers, num_classes, criterion):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=1,
                                       dilation=2)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=1,
                                       dilation=4,
                                       multi_grid=(1, 1, 1))

        self.aspp = ASPP_Module(in_channels=2048,
                                atrous_rates=(12, 24, 36),
                                norm_layer=BatchNorm2d)

        self.auxlayer = FCNHead(1024, num_classes, BatchNorm2d)

        self.dsn = nn.Sequential(
            nn.Conv2d(2560, 512, 1, bias=False), BatchNorm2d(512),
            nn.ReLU(True), nn.Dropout2d(0.5, False),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            BatchNorm2d(512), nn.ReLU(True), nn.Dropout2d(0.1, False),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))

        self.acfhead = ACFModule(2560, 512)

        self.bottleneck = nn.Sequential(
            nn.Conv2d(3072,
                      1024,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(1024),
            nn.Dropout2d(0.1, False),
            nn.Conv2d(1024,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.criterion = criterion