Пример #1
0
    def __init__(self, block, layers, num_classes):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1,1,1)) # we do not apply multi-grid method here

        # extra added layers
        self.head = nn.Sequential(
            nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512),
            NonLocal2d(inplanes=512, ratio=256, downsample=False),
            nn.Dropout2d(0.05)
            )
        self.cls = nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512),
            nn.Dropout2d(0.05),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
        )
Пример #2
0
    def __init__(self, block, layers, num_classes):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=1,
                                       dilation=2)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=1,
                                       dilation=4,
                                       multi_grid=(1, 1, 1))

        # extra added layers
        self.context = nn.Sequential(
            nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512),
            BaseOC_Module(in_channels=512,
                          out_channels=512,
                          key_channels=256,
                          value_channels=256,
                          dropout=0.05,
                          sizes=([1])))
        self.cls = nn.Conv2d(512,
                             num_classes,
                             kernel_size=1,
                             stride=1,
                             padding=0,
                             bias=True)
        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512), nn.Dropout2d(0.05),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
Пример #3
0
 def __init__(self, block, layers, num_classes):
     self.inplanes = 128
     super(ResNet, self).__init__()
     self.conv1 = conv3x3(3, 64, stride=2)
     self.bn1 = BatchNorm2d(64)
     self.relu1 = nn.ReLU(inplace=False)
     self.conv2 = conv3x3(64, 64)
     self.bn2 = BatchNorm2d(64)
     self.relu2 = nn.ReLU(inplace=False)
     self.conv3 = conv3x3(64, 128)
     self.bn3 = BatchNorm2d(128)
     self.relu3 = nn.ReLU(inplace=False)
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.relu = nn.ReLU(inplace=False)
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1,2,4))
     
     self.head = nn.Sequential(ASPPModule(2048),
         nn.Conv2d(512, num_classes, kernel_size=1)
         )
     
     self.dsn = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=3, padding=1, bias=False),
         InPlaceABNSync(512),
         nn.Dropout2d(0.1),
         nn.Conv2d(512, num_classes, kernel_size=1)
         )
Пример #4
0
 def __init__(self,
              in_channels,
              key_channels,
              value_channels,
              out_channels=None,
              scale=1):
     super(_PyramidSelfAttentionBlock, self).__init__()
     self.scale = scale
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.key_channels = key_channels
     self.value_channels = value_channels
     if out_channels == None:
         self.out_channels = in_channels
     self.f_key = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels,
                   out_channels=self.key_channels,
                   kernel_size=1,
                   stride=1,
                   padding=0), InPlaceABNSync(self.key_channels))
     self.f_query = self.f_key
     self.f_value = nn.Conv2d(in_channels=self.in_channels,
                              out_channels=self.value_channels,
                              kernel_size=1,
                              stride=1,
                              padding=0)
     self.W = nn.Conv2d(in_channels=self.value_channels,
                        out_channels=self.out_channels,
                        kernel_size=1,
                        stride=1,
                        padding=0)
     nn.init.constant(self.W.weight, 0)
     nn.init.constant(self.W.bias, 0)
Пример #5
0
 def __init__(self, in_channels, out_channels, dropout, sizes=([1])):
     super(Pyramid_OC_Module, self).__init__()
     self.group = len(sizes)
     self.stages = []
     self.stages = nn.ModuleList([
         self._make_stage(in_channels, out_channels, in_channels // 2,
                          in_channels, size) for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(2 * in_channels * self.group,
                   out_channels,
                   kernel_size=1,
                   padding=0), InPlaceABNSync(out_channels),
         nn.Dropout2d(dropout))
     self.up_dr = nn.Sequential(
         nn.Conv2d(in_channels,
                   in_channels * self.group,
                   kernel_size=1,
                   padding=0), InPlaceABNSync(in_channels * self.group))
Пример #6
0
 def __init__(self,
              in_channels,
              out_channels,
              key_channels,
              value_channels,
              dropout,
              sizes=([1])):
     super(BaseOC_Module, self).__init__()
     self.stages = []
     self.stages = nn.ModuleList([
         self._make_stage(in_channels, out_channels, key_channels,
                          value_channels, size) for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(2 * in_channels, out_channels, kernel_size=1, padding=0),
         InPlaceABNSync(out_channels), nn.Dropout2d(dropout))
Пример #7
0
    def __init__(self, features, out_features=512, dilations=(12, 24, 36)):
        super(ASP_OC_Module, self).__init__()
        self.context = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=True), InPlaceABNSync(out_features),
            BaseOC_Context_Module(in_channels=out_features,
                                  out_channels=out_features,
                                  key_channels=out_features // 2,
                                  value_channels=out_features,
                                  dropout=0,
                                  sizes=([2])))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_features))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[0],
                      dilation=dilations[0],
                      bias=False), InPlaceABNSync(out_features))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[1],
                      dilation=dilations[1],
                      bias=False), InPlaceABNSync(out_features))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[2],
                      dilation=dilations[2],
                      bias=False), InPlaceABNSync(out_features))

        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(out_features * 5,
                      out_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_features),
            nn.Dropout2d(0.1))
Пример #8
0
 def __init__(self,
              backbone='ResNet50',
              pyramid_pooling='deeplabv3',
              embedding=128,
              batch_mode='sync'):
     super(InterFrameNet, self).__init__()
     if pyramid_pooling == 'deeplabv3':
         self.features = ResNetDeepLabv3(backbone,
                                         num_classes=embedding,
                                         batch_mode=batch_mode)
     elif pyramid_pooling == 'pspnet':
         raise RuntimeError('Pooling module not implemented')
     else:
         raise RuntimeError('Unknown pyramid pooling module')
     self.cls = nn.Sequential(
         nn.Conv2d(2 * embedding,
                   embedding,
                   kernel_size=1,
                   stride=1,
                   padding=0), InPlaceABNSync(embedding),
         nn.Dropout2d(0.10),
         nn.Conv2d(embedding, 1, kernel_size=1, stride=1, padding=0))
Пример #9
0
    def __init__(self, features, hidden_features=512, out_features=512, dilations=(12, 24, 36)):
        super(ASPPModule, self).__init__()
        self.conv1 = nn.Sequential(nn.Conv2d(features, hidden_features, kernel_size=1, bias=False),
                                   InPlaceABNSync(hidden_features))
        self.conv2 = nn.Sequential(nn.Conv2d(features, hidden_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
                                   InPlaceABNSync(hidden_features))
        self.conv3 = nn.Sequential(nn.Conv2d(features, hidden_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
                                   InPlaceABNSync(hidden_features))
        self.conv4 = nn.Sequential(nn.Conv2d(features, hidden_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
                                   InPlaceABNSync(hidden_features))
        self.image_pooling = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)),
                                   nn.Conv2d(features, hidden_features, kernel_size=1, bias=False),
                                   InPlaceABNSync(hidden_features))

        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(hidden_features * 5, out_features, kernel_size=1, bias=False),
            InPlaceABNSync(out_features),
            nn.Dropout2d(0.1)
            )
Пример #10
0
    def __init__(self):
        super(ASPP, self).__init__()

        self.avg_pool = nn.AdaptiveAvgPool2d(1)

        self.conv_img = nn.Sequential(nn.Conv2d(4*512, 512, kernel_size=1, padding=0, dilation=1, bias=False),
                                      InPlaceABNSync(512))

        self.conv1 = nn.Sequential(nn.Conv2d(4*512, 512, kernel_size=1, padding=0, dilation=1, bias=False),
                                   InPlaceABNSync(512))
        self.conv2 = nn.Sequential(nn.Conv2d(4*512, 512, kernel_size=3, padding=12, dilation=12, bias=False),
                                   InPlaceABNSync(512))
        self.conv3 = nn.Sequential(nn.Conv2d(4*512, 512, kernel_size=3, padding=24, dilation=24, bias=False),
                                   InPlaceABNSync(512))
        self.conv4 = nn.Sequential(nn.Conv2d(4*512, 512, kernel_size=3, padding=36, dilation=36, bias=False),
                                   InPlaceABNSync(512))

        self.conv_bn_dropout = nn.Sequential(nn.Conv2d(5*512, 512, kernel_size=1, padding=0, dilation=1, bias=False),
                                             InPlaceABNSync(512),
                                             nn.Dropout2d(0.1))