Exemple #1
0
 def __init__(self, num_classes):
     super(Decoder_Module, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2d(512,
                   256,
                   kernel_size=1,
                   padding=0,
                   dilation=1,
                   bias=False), InPlaceABNSync(256))
     self.conv2 = nn.Sequential(
         nn.Conv2d(256,
                   48,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   dilation=1,
                   bias=False), InPlaceABNSync(48))
     self.conv3 = nn.Sequential(
         nn.Conv2d(768,
                   512,
                   kernel_size=1,
                   padding=0,
                   dilation=1,
                   bias=False), InPlaceABNSync(512))
     #  self.conv3 = nn.Sequential(
     #  nn.Conv2d(304, 256, kernel_size=3, padding=1, dilation=1, bias=False),
     #  InPlaceABNSync(256),
     #  nn.Conv2d(256, 512, kernel_size=3, padding=1, dilation=1, bias=False),
     #  InPlaceABNSync(512))
     self.conv4 = nn.Conv2d(512,
                            num_classes,
                            kernel_size=1,
                            padding=0,
                            dilation=1,
                            bias=True)
Exemple #2
0
    def __init__(self, in_channels, out_channels):
        super(DPCModule, self).__init__()
        inter_channels = in_channels // 4
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            InPlaceABNSync(inter_channels), nn.ReLU())

        self.pa = PAModule(inter_channels)
        self.ca = CAModule(inter_channels)

        self.conv_red_1 = nn.Sequential(nn.Dropout2d(0.1, False),
                                        nn.Conv2d(512, out_channels, 1))
        self.conv_red_2 = nn.Sequential(nn.Dropout2d(0.1, False),
                                        nn.Conv2d(512, out_channels, 1))
        self.conv_red_3 = nn.Sequential(nn.Dropout2d(0.1, False),
                                        nn.Conv2d(512, out_channels, 1))

        self.bottlneck = nn.Sequential(
            nn.Conv2d(inter_channels * 3 + in_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_channels),
            nn.Dropout2d(0.1))
Exemple #3
0
    def __init__(self, in_channels, out_channels, num_classes):
        super(RCCAModule, self).__init__()
        inter_channels = in_channels // 4
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            InPlaceABNSync(inter_channels))
        self.cca = CrissCrossAttention(inter_channels)
        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False), InPlaceABNSync(inter_channels))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_channels),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
Exemple #4
0
    def __init__(self, in_channels, out_channels, num_classes, type='seg_nl'):
        super(GCBModule, self).__init__()
        assert type in ['baseline', 'seg_nl']
        inter_channels = in_channels // 4
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            InPlaceABNSync(inter_channels))
        if type == 'seg_nl':
            self.ctb = SegNonLocal2d(inplanes=inter_channels,
                                     planes=inter_channels // 2,
                                     downsample=False,
                                     k_seg=True,
                                     q_seg=False)
        elif type == 'baseline':
            self.ctb = None
        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False), InPlaceABNSync(inter_channels))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_channels),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
Exemple #5
0
    def __init__(self,
                 num_heatmap,
                 num_pafs,
                 num_refinement_stages=5,
                 in_fea=[512, 2048],
                 mid_fea=256):
        super().__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_fea[0],
                      mid_fea,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(mid_fea))
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_fea[1],
                      mid_fea,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(mid_fea))

        self.cpm = Cpm(512, 128)
        self.initial_stage = InitialStage(128, num_heatmap, num_pafs)
        self.refinement_stages = nn.ModuleList()
        for idx in range(num_refinement_stages):
            self.refinement_stages.append(
                RefinementStage(128 + num_heatmap + num_pafs, 128, num_heatmap,
                                num_pafs))
Exemple #6
0
    def __init__(self, block, layers, num_classes):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=1,
                                       dilation=2)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=1,
                                       dilation=4,
                                       multi_grid=(1, 1, 1))

        self.head = nn.Sequential(
            nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=0,
                      bias=True), InPlaceABNSync(512),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))

        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512), nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
 def _make_stage(self, features, out_features, size):
     #pool input to (size, size)
     prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
     # conv 1x1 to reduce channels
     conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)
     bn = InPlaceABNSync(out_features)
     return nn.Sequential(prior, conv, bn)
Exemple #8
0
    def __init__(self,
                 in_planes,
                 out_planes,
                 ksize,
                 stride,
                 pad,
                 dilation=1,
                 groups=1,
                 has_bn=True,
                 bn_eps=1e-5,
                 has_relu=True,
                 inplace=True,
                 has_bias=False):
        super(ConvBnRelu, self).__init__()
        self.conv = nn.Conv2d(in_planes,
                              out_planes,
                              kernel_size=ksize,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              groups=groups,
                              bias=has_bias)

        self.has_bn = has_bn
        self.has_relu = has_relu
        if self.has_bn and self.has_relu:
            self.bn = InPlaceABNSync(out_planes)
        else:
            if self.has_bn:
                self.bn = BatchNorm2d(out_planes)
            if self.has_relu:
                self.relu = nn.ReLU()
Exemple #9
0
    def __init__(self, block, layers, num_classes):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2, multi_grid=(1,1,1))
        
        self.layer5 = PSPModule(2048,512)
            
        self.edge_layer = Edge_Module()
        self.layer6 = Decoder_Module(num_classes)
        self.layer7 = nn.Sequential(
            nn.Conv2d(1024, 256, kernel_size=1, padding=0, dilation=1, bias=False),
            InPlaceABNSync(256),
            nn.Dropout2d(0.1),
            nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True)
            ) 
Exemple #10
0
    def __init__(self, in_channels, n_filters):
        super(DecoderBlock, self).__init__()

        self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
        self.norm1 = InPlaceABNSync(in_channels // 4)

        self.deconv2 = nn.ConvTranspose2d(in_channels // 4,
                                          in_channels // 4,
                                          3,
                                          stride=2,
                                          padding=1,
                                          output_padding=1)
        self.norm2 = InPlaceABNSync(in_channels // 4)

        self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
        self.norm3 = InPlaceABNSync(n_filters)
Exemple #11
0
    def __init__(self,
                 in_planes,
                 out_planes,
                 ksize,
                 stride,
                 pad,
                 out_pad,
                 groups=1,
                 has_bn=True,
                 bn_eps=1e-5,
                 has_relu=True,
                 inplace=True,
                 has_bias=False):
        super(DeConvBnRelu, self).__init__()
        self.deconv = nn.ConvTranspose2d(in_channels=in_planes,
                                         out_channels=out_planes,
                                         kernel_size=ksize,
                                         stride=stride,
                                         padding=pad,
                                         output_padding=out_pad,
                                         bias=has_bias)

        self.has_bn = has_bn
        self.has_relu = has_relu
        if self.has_bn and self.has_relu:
            self.bn = InPlaceABNSync(out_planes)
        else:
            if self.has_bn:
                self.bn = BatchNorm2d(out_planes)
            if self.has_relu:
                self.relu = nn.ReLU()
Exemple #12
0
    def __init__(self, block, layers, num_classes, cfg):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        with_att = cfg.att.with_att
        att = cfg.att.type
        att_stage = cfg.att.att_stage
        att_pos = cfg.att.att_pos
        att_location = cfg.att.att_location

        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
        self.layer1 = self._make_layer(block, 64, layers[0], with_att=with_att*att_stage[0], att=att, att_pos=att_pos, att_loc=att_location[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, with_att=with_att*att_stage[1], att=att, att_pos=att_pos, att_loc=att_location[1])
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, with_att=with_att*att_stage[2], att=att, att_pos=att_pos, att_loc=att_location[2])
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1,1,1), with_att=with_att*att_stage[3], att=att, att_pos=att_pos, att_loc=att_location[3])
        #self.layer5 = PSPModule(2048, 512)
        self.head = GCBModule(2048, 512, num_classes, cfg.module)

        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
            )
Exemple #13
0
 def __init__(self,in_fea=[256,512,1024], mid_fea=256, out_fea=2):
     super(Edge_Module, self).__init__()
     
     self.conv1 =  nn.Sequential(
         nn.Conv2d(in_fea[0], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
         InPlaceABNSync(mid_fea)
         ) 
     self.conv2 =  nn.Sequential(
         nn.Conv2d(in_fea[1], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
         InPlaceABNSync(mid_fea)
         )  
     self.conv3 =  nn.Sequential(
         nn.Conv2d(in_fea[2], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
         InPlaceABNSync(mid_fea)
     )
     self.conv4 = nn.Conv2d(mid_fea,out_fea, kernel_size=3, padding=1, dilation=1, bias=True)
     self.conv5 = nn.Conv2d(out_fea*3,out_fea, kernel_size=1, padding=0, dilation=1, bias=True)
Exemple #14
0
def conv(inplanes, outplanes, kernel=3, stride=1, padding=1):
    conv = nn.Sequential(
        nn.Conv2d(inplanes,
                  outplanes,
                  kernel_size=kernel,
                  stride=stride,
                  padding=padding), InPlaceABNSync(outplanes))
    return conv
Exemple #15
0
    def __init__(self, block, layers, num_classes):
        self.inplanes = 128
        super(ResNet, self).__init__()
        print ('Model: ASP_OCNet')
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
        #######
        # for p in self.parameters():
           # p.requires_grad = False
        # print ('----Lock before Layer4----')
        #######
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1,1,1))

        # extra added layers
        # old
        self.lastChannel = 256 # 512 for old code, 256 for new code
        # self.context = nn.Sequential(
                # ASP_OC_Module(2048, self.lastChannel),
                # )
        # new
        self.context = nn.Sequential(
                nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
                InPlaceABNSync(512),
                ASP_OC_Module(512, self.lastChannel)
                )

        self.cls = nn.Conv2d(self.lastChannel, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512),
            nn.Dropout2d(0.10),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
            )
Exemple #16
0
    def __init__(self, features, out_features=512, sizes=(1, 2, 3)):
        super(PSPModule, self).__init__()

        self.stages = []
        self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes])
        self.bottleneck = nn.Sequential(
            nn.Conv2d(features+len(sizes)*out_features, out_features, kernel_size=3, padding=1, dilation=1, bias=False),
            InPlaceABNSync(out_features),
            )
Exemple #17
0
 def __init__(self, in_channels, out_channels, key_channels, value_channels, dropout, sizes=([1])):
     super(BaseOC_Context_Module, self).__init__()
     self.stages = []
     self.stages = nn.ModuleList(
         [self._make_stage(in_channels, out_channels, key_channels, value_channels, size) for size in sizes])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0),
         InPlaceABNSync(out_channels),
     )
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = InPlaceABNSync(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = InPlaceABNSync(planes)
     self.conv3 = nn.Conv2d(planes,
                            planes * self.expansion,
                            kernel_size=1,
                            bias=False)
     self.bn3 = nn.BatchNorm2d(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(BasicBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = InPlaceABNSync(planes)

        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        self.downsample = downsample
        self.stride = stride
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        num_inchannels = self.num_inchannels
        fuse_layers = []
        for i in range(num_branches if self.multi_scale_output else 1):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(
                        nn.Sequential(
                            nn.Conv2d(num_inchannels[j],
                                      num_inchannels[i],
                                      1,
                                      1,
                                      0,
                                      bias=False),
                            BatchNorm2d(num_inchannels[i]),
                            nn.Upsample(scale_factor=2**(j - i),
                                        mode='nearest')))

                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv3x3s = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            num_outchannels_conv3x3 = num_inchannels[i]
                            conv3x3s.append(
                                nn.Sequential(
                                    nn.Conv2d(num_inchannels[j],
                                              num_outchannels_conv3x3,
                                              3,
                                              2,
                                              1,
                                              bias=False),
                                    BatchNorm2d(num_outchannels_conv3x3)))

                        else:
                            num_outchannels_conv3x3 = num_inchannels[j]
                            conv3x3s.append(
                                nn.Sequential(
                                    nn.Conv2d(num_inchannels[j],
                                              num_outchannels_conv3x3,
                                              3,
                                              2,
                                              1,
                                              bias=False),
                                    InPlaceABNSync(num_outchannels_conv3x3)))
                    fuse_layer.append(nn.Sequential(*conv3x3s))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
    def __init__(self, num_classes):
        super(Decoder_Module, self).__init__()
        #xt/psp features has depth 256
        self.conv1 = nn.Sequential(
            nn.Conv2d(512,
                      256,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(256))
        #xl/conv2 feature has depth 256 --> 48
        self.conv2 = nn.Sequential(
            nn.Conv2d(256,
                      48,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(48))

        self.conv4 = nn.Sequential(
            nn.Conv2d(304,
                      256,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(256),
            nn.Conv2d(256,
                      256,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(256))
        # self.RC1 = Residual_Covolution(512, 1024, num_classes)
        # self.RC2 = Residual_Covolution(512, 1024, num_classes)
        # self.RC3 = Residual_Covolution(512, 1024, num_classes)
        self.conv5 = nn.Conv2d(256,
                               num_classes,
                               kernel_size=1,
                               padding=0,
                               dilation=1,
                               bias=True)
Exemple #22
0
def deconv(in_channels,
           out_channels,
           kernel_size=4,
           stride=2,
           padding=1,
           output_padding=0):
    deconv = nn.Sequential(
        nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride,
                           padding, output_padding),
        InPlaceABNSync(out_channels))
    return deconv
Exemple #23
0
    def __init__(self, in_channels, out_channels, num_classes, type='nl'):
        super(GCBModule, self).__init__()
        assert type in ['gcb', 'nl', 'nl_bn', 'multi', 'multi_spatial']
        inter_channels = in_channels // 4
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            InPlaceABNSync(inter_channels))
        if type == 'gcb':
            self.ctb = ContextBlock(inter_channels, ratio=1. / 4)
        elif type == 'nl':
            self.ctb = NonLocal2d(inter_channels, inter_channels // 2)
        elif type == 'nl_bn':
            self.ctb = NonLocal2d_bn(inter_channels, inter_channels // 2)
        elif type == 'multi':
            self.ctb = MultiheadBlock(inter_channels, ratio=1. / 4, head_num=8)
        elif type == 'multi_spatial':
            self.ctb = MultiheadSpatialBlock(inter_channels,
                                             ratio=1. / 4,
                                             head_num=16)
        else:
            self.ctb = None
        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False), InPlaceABNSync(inter_channels))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_channels),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
Exemple #24
0
    def __init__(self, in_channels, out_channels):
        super(RCCAModule, self).__init__()
        inter_channels = in_channels // 4
        self.channels = in_channels // 4

        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            InPlaceABNSync(inter_channels))

        self.cca = PAM_Module(inter_channels)

        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False), InPlaceABNSync(inter_channels))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_channels))
    def _make_transition_layer(self, num_channels_pre_layer,
                               num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(
                        nn.Sequential(
                            nn.Conv2d(num_channels_pre_layer[i],
                                      num_channels_cur_layer[i],
                                      3,
                                      1,
                                      1,
                                      bias=False),
                            InPlaceABNSync(num_channels_cur_layer[i])))
                else:
                    transition_layers.append(None)
            else:
                conv3x3s = []
                for j in range(i + 1 - num_branches_pre):
                    inchannels = num_channels_pre_layer[-1]
                    outchannels = num_channels_cur_layer[i] \
                        if j == i-num_branches_pre else inchannels
                    conv3x3s.append(
                        nn.Sequential(
                            nn.Conv2d(inchannels,
                                      outchannels,
                                      3,
                                      2,
                                      1,
                                      bias=False),
                            InPlaceABNSync(outchannels)))
                transition_layers.append(nn.Sequential(*conv3x3s))

        return nn.ModuleList(transition_layers)
Exemple #26
0
    def __init__(self, in_channels, out_channels):
        super(DANetHead, self).__init__()
        inter_channels = in_channels // 4
        self.conv5a = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            InPlaceABNSync(inter_channels), nn.ReLU())

        self.conv5c = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            InPlaceABNSync(inter_channels), nn.ReLU())

        self.sa = PAM_Module(inter_channels)
        self.sc = CAM_Module(inter_channels)
        self.conv51 = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False), InPlaceABNSync(inter_channels), nn.ReLU())
        self.conv52 = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False), InPlaceABNSync(inter_channels), nn.ReLU())

        self.conv6 = nn.Sequential(nn.Dropout2d(0.1, False),
                                   nn.Conv2d(512, out_channels, 1))
        self.conv7 = nn.Sequential(nn.Dropout2d(0.1, False),
                                   nn.Conv2d(512, out_channels, 1))

        self.conv8 = nn.Sequential(nn.Dropout2d(0.1, False),
                                   nn.Conv2d(512, out_channels, 1))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(inter_channels * 3,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_channels),
            nn.Dropout2d(0.1))
Exemple #27
0
 def __init__(self, num_classes, pose_classes, pafs_num):
     super(Decoder_Module, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2d(512,
                   256,
                   kernel_size=1,
                   padding=0,
                   dilation=1,
                   bias=False), InPlaceABNSync(256))
     self.conv2 = nn.Sequential(
         nn.Conv2d(256,
                   48,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   dilation=1,
                   bias=False), InPlaceABNSync(48))
     self.conv3 = nn.Sequential(
         nn.Conv2d(304 + pose_classes + pafs_num + 128,
                   256,
                   kernel_size=1,
                   padding=0,
                   dilation=1,
                   bias=False), InPlaceABNSync(256),
         nn.Conv2d(256,
                   256,
                   kernel_size=1,
                   padding=0,
                   dilation=1,
                   bias=False), InPlaceABNSync(256))
     self.conv4 = nn.Conv2d(256,
                            num_classes,
                            kernel_size=1,
                            padding=0,
                            dilation=1,
                            bias=True)
Exemple #28
0
    def __init__(self, block, layers, num_classes, npoints):
        self.inplanes = 128
        super(ResNet, self).__init__()

        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=1,
                                       dilation=2,
                                       multi_grid=(1, 1, 1))
        self.layer5 = ASPPModule(features=2048,
                                 inner_features=256,
                                 out_features=512)

        self.module_list = []
        self.edge_layer = Edge_Module()
        self.pose_layer = get_pose_net(npoints, is_train=True)
        self.layer6 = Decoder_Module(num_classes)

        self.module_list.append(self.edge_layer)
        self.module_list.append(self.pose_layer)

        self.out_layer = nn.Conv2d(512,
                                   num_classes,
                                   kernel_size=1,
                                   padding=0,
                                   bias=True)

        self.conv_fuse = nn.Sequential(
            nn.Conv2d(1536, 512, kernel_size=1, padding=0, bias=False),
            InPlaceABNSync(512))
        self.nl_SEP = NONLocalBlock2D(in_channels=512, sub_sample=True)
Exemple #29
0
    def __init__(self, features, out_features=512, dilations=(12, 24, 36)):
        super(ASP_OC_Module, self).__init__()
        self.context = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=True), InPlaceABNSync(out_features),
            BaseOC_Context_Module(in_channels=out_features,
                                  out_channels=out_features,
                                  key_channels=out_features // 2,
                                  value_channels=out_features,
                                  dropout=0,
                                  sizes=([1])))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_features))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[0],
                      dilation=dilations[0],
                      bias=False), InPlaceABNSync(out_features))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[1],
                      dilation=dilations[1],
                      bias=False), InPlaceABNSync(out_features))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features,
                      out_features,
                      kernel_size=3,
                      padding=dilations[2],
                      dilation=dilations[2],
                      bias=False), InPlaceABNSync(out_features))

        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(out_features * 5,
                      out_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_features),
            nn.Dropout2d(0.1))
Exemple #30
0
    def __init__(self,
                 features,
                 inner_features=256,
                 out_features=512,
                 dilations=(12, 24, 36)):
        super(ASPPModule, self).__init__()

        self.conv1 = nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Conv2d(features,
                      inner_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(inner_features))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features,
                      inner_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(inner_features))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features,
                      inner_features,
                      kernel_size=3,
                      padding=dilations[0],
                      dilation=dilations[0],
                      bias=False), InPlaceABNSync(inner_features))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features,
                      inner_features,
                      kernel_size=3,
                      padding=dilations[1],
                      dilation=dilations[1],
                      bias=False), InPlaceABNSync(inner_features))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features,
                      inner_features,
                      kernel_size=3,
                      padding=dilations[2],
                      dilation=dilations[2],
                      bias=False), InPlaceABNSync(inner_features))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(inner_features * 5,
                      out_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_features),
            nn.Dropout2d(0.1))