Exemple #1
0
    def _make_transpose(self, transblock, planes, blocks, stride=1):

        upsample = None
        if stride != 1:
            upsample = nn.Sequential(
                nn.ConvTranspose2d(self.inplanes * transblock.expansion, planes,
                                   kernel_size=2, stride=stride,
                                   padding=0, bias=False),
                SynchronizedBatchNorm2d(planes),
            )
        elif self.inplanes * transblock.expansion != planes:
            upsample = nn.Sequential(
                nn.Conv2d(self.inplanes * transblock.expansion, planes,
                          kernel_size=1, stride=stride, bias=False),
                SynchronizedBatchNorm2d(planes),
            )

        layers = []
        
        for i in range(1, blocks):
            layers.append(transblock(self.inplanes, self.inplanes * transblock.expansion))

        layers.append(transblock(self.inplanes, planes, stride, upsample))
        self.inplanes = planes // transblock.expansion

        return nn.Sequential(*layers)
Exemple #2
0
 def __init__(self,
              inplanes,
              planes,
              groups,
              reduction,
              stride=1,
              downsample=None):
     super(SEResNetBottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes,
                            planes,
                            kernel_size=1,
                            bias=False,
                            stride=stride)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            padding=1,
                            groups=groups,
                            bias=False)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = SynchronizedBatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.se_module = SEModule(planes * 4, reduction=reduction)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, num_class=150, fc_dim=4096,
                 use_softmax=False, pool_scales=(1, 2, 3, 6)):
        super(PPMBilinearDeepsup, self).__init__()
        self.use_softmax = use_softmax

        self.ppm = []
        for scale in pool_scales:
            self.ppm.append(nn.Sequential(
                nn.AdaptiveAvgPool2d(scale),
                nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                SynchronizedBatchNorm2d(512),
                nn.ReLU(inplace=True)
            ))
        self.ppm = nn.ModuleList(self.ppm)
        #self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)

        self.aspp_last = nn.Sequential(
            nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
                      kernel_size=3, padding=1, bias=False),
            SynchronizedBatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.1)
        )
        #self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
        #self.dropout_deepsup = nn.Dropout2d(0.1)
        for m in self.modules():
            if isinstance(m,nn.Conv2d):
                our_kaiming_normal_(m.weight,0.1)
                if m.bias is not None:
                    constant_(m.bias,0)
            elif isinstance(m,nn.BatchNorm2d):
                constant_(m.weight,1)
                constant_(m.bias,0)
Exemple #4
0
 def __init__(self,
              inplanes,
              planes,
              groups,
              reduction,
              stride=1,
              downsample=None,
              base_width=4):
     super(SEResNeXtBottleneck, self).__init__()
     width = math.floor(planes * (base_width / 64)) * groups
     self.conv1 = nn.Conv2d(inplanes,
                            width,
                            kernel_size=1,
                            bias=False,
                            stride=1)
     self.bn1 = SynchronizedBatchNorm2d(width)
     self.conv2 = nn.Conv2d(width,
                            width,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            groups=groups,
                            bias=False)
     self.bn2 = SynchronizedBatchNorm2d(width)
     self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
     self.bn3 = SynchronizedBatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.se_module = SEModule(planes * 4, reduction=reduction)
     self.downsample = downsample
     self.stride = stride
Exemple #5
0
    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(Bottleneck_AWM, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = SynchronizedBatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn2 = SynchronizedBatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
        self.bn3 = SynchronizedBatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc1 = nn.Linear(inplanes, inplanes, bias=False)
        self.fc2 = nn.Linear(planes * 4, planes * 4, bias=False)

        self.combine_Linear = nn.Sequential(
            nn.ReLU(inplace=True),
            nn.Linear(inplanes + planes * 4, (inplanes + planes * 4) // 2),
            nn.Linear((inplanes + planes * 4) // 2, 2), nn.Sigmoid())
Exemple #6
0
    def __init__(self, num_class=150, fc_dim=4096,
                 use_softmax=False, pool_scales=(1, 2, 3, 6)):
        super(PPMBilinearDeepsup, self).__init__()
        self.use_softmax = use_softmax

        self.ppm = []
        for scale in pool_scales:
            self.ppm.append(nn.Sequential(
                nn.AdaptiveAvgPool2d(scale),
                nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                SynchronizedBatchNorm2d(512),
                nn.ReLU(inplace=True)
            ))
        self.ppm = nn.ModuleList(self.ppm)
        self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)

        self.conv_last = nn.Sequential(
            nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
                      kernel_size=3, padding=1, bias=False),
            SynchronizedBatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_class, kernel_size=1)
        )
        self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
        self.dropout_deepsup = nn.Dropout2d(0.1)
Exemple #7
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = SynchronizedBatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = SynchronizedBatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=True)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = SynchronizedBatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, SynchronizedBatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Exemple #8
0
    def __init__(self,
                 num_class=150,
                 fc_dim=4096,
                 use_softmax=False,
                 pool_scales=(1, 2, 3, 6)):
        super(PSPBilinear, self).__init__()
        self.use_softmax = use_softmax

        self.psp = []
        for scale in pool_scales:
            self.psp.append(
                nn.Sequential(
                    nn.AdaptiveAvgPool2d(scale),
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    SynchronizedBatchNorm2d(512), nn.ReLU(inplace=True)))
        self.psp = nn.ModuleList(self.psp)

        self.conv_last = nn.Sequential(
            nn.Conv2d(fc_dim + len(pool_scales) * 512,
                      512,
                      kernel_size=3,
                      padding=1,
                      bias=False), SynchronizedBatchNorm2d(512),
            nn.ReLU(inplace=True), nn.Dropout2d(0.1),
            nn.Conv2d(512, num_class, kernel_size=1))
Exemple #9
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              cbam=False):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = SynchronizedBatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
     self.cbam = cbam
     if self.cbam:
         gate_channel = planes * 4
         reduction_rate = 16
         self.gate = AttentionGate(gate_channel,
                                   size_reduction_ratio=2,
                                   reduction_ratio=reduction_rate,
                                   version='v3')
Exemple #10
0
    def __init__(self,
                 in_planes,
                 out_planes,
                 kernel=23,
                 stride=1,
                 has_bias=False):
        super(GCN, self).__init__()
        #self.psp_conv = conv3x3_bn_relu(in_planes, out_planes, 1)
        #in_planes = out_planes
        #out_planes = cfg.MODEL.NUM_CLASSES
        self.gcn23x23_0 = nn.Sequential(
            nn.Conv2d(in_planes,
                      out_planes,
                      kernel_size=[1, kernel],
                      stride=stride,
                      padding=[0, kernel // 2],
                      dilation=(1, 1),
                      bias=has_bias,
                      groups=cfg.RESNETS.NUM_GROUPS),
            SynchronizedBatchNorm2d(out_planes), nn.ReLU(inplace=True))

        self.gcn23x23_1 = nn.Sequential(
            nn.Conv2d(in_planes,
                      out_planes,
                      kernel_size=[kernel, 1],
                      stride=stride,
                      padding=[kernel // 2, 0],
                      bias=has_bias,
                      groups=cfg.RESNETS.NUM_GROUPS),
            SynchronizedBatchNorm2d(out_planes), nn.ReLU(inplace=True))

        self.gcn23x23_2 = nn.Sequential(
            nn.Conv2d(out_planes,
                      out_planes,
                      kernel_size=[kernel, 1],
                      stride=stride,
                      padding=[kernel // 2, 0],
                      bias=has_bias,
                      groups=cfg.RESNETS.NUM_GROUPS),
            SynchronizedBatchNorm2d(out_planes), nn.ReLU(inplace=True))

        self.gcn23x23_3 = nn.Sequential(
            nn.Conv2d(out_planes,
                      out_planes,
                      kernel_size=[1, kernel],
                      stride=stride,
                      padding=[0, kernel // 2],
                      bias=has_bias,
                      groups=cfg.RESNETS.NUM_GROUPS),
            SynchronizedBatchNorm2d(out_planes), nn.ReLU(inplace=True))

        #self.br_side = nn.Sequential(
        #        nn.Conv2d(out_planes, cfg.MODEL.NUM_CLASSES, 3, padding=1, stride=1),
        #        SynchronizedBatchNorm2d(cfg.MODEL.NUM_CLASSES),
        #        nn.ReLU(inplace=True),
        #        nn.Conv2d(cfg.MODEL.NUM_CLASSES, cfg.MODEL.NUM_CLASSES, 3, padding=1, stride=1))
        #self.conv_last = nn.Conv2d(out_planes, cfg.MODEL.NUM_CLASSES, 1, padding=0, stride=1)

        self.apply(self.weights_init)
Exemple #11
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
Exemple #12
0
    def __init__(self,
                 num_class=150,
                 fc_dim=4096,
                 use_softmax=False,
                 pool_scales=(1, 2, 3, 6),
                 fpn_inplanes=(256, 512, 1024, 2048),
                 fpn_dim=256):
        super(UPerNet, self).__init__()
        self.use_softmax = use_softmax

        # PPM Module
        self.ppm_pooling = []
        self.ppm_conv = []

        for scale in pool_scales:
            self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
            self.ppm_conv.append(
                nn.Sequential(
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    SynchronizedBatchNorm2d(512), nn.ReLU(inplace=True)))
        self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
        self.ppm_conv = nn.ModuleList(self.ppm_conv)
        if 'deform_conv' in cfg.MODEL.CONV_BODY and False:
            self.ppm_last_conv = conv3x3_bn_relu(
                fc_dim + len(pool_scales) * 512, fpn_dim, 1)
            self.ppm_last_conv_drop = nn.Dropout2d(0.1)
        else:
            self.ppm_last_conv = conv3x3_bn_relu(
                fc_dim + len(pool_scales) * 512, fpn_dim, 1)
        # FPN Module
        self.fpn_in = []
        for fpn_inplane in fpn_inplanes[:-1]:  # skip the top layer
            self.fpn_in.append(
                nn.Sequential(
                    nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
                    SynchronizedBatchNorm2d(fpn_dim), nn.ReLU(inplace=True)))
        self.fpn_in = nn.ModuleList(self.fpn_in)

        self.fpn_out = []
        for i in range(len(fpn_inplanes) - 1):  # skip the top layer
            if not cfg.SEM.GCN_ON:
                self.fpn_out.append(
                    nn.Sequential(conv3x3_bn_relu(fpn_dim, fpn_dim, 1), ))
            else:
                self.fpn_out.append(
                    nn.Sequential(GCN(fpn_dim, fpn_dim, kernel=15)))
        self.fpn_out = nn.ModuleList(self.fpn_out)
        if 'deform_conv' in cfg.MODEL.CONV_BODY and False:
            self.conv_last = nn.Sequential(
                conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
                nn.Dropout2d(0.9), nn.Conv2d(fpn_dim, num_class,
                                             kernel_size=1))
        else:
            self.conv_last = nn.Sequential(
                conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
                nn.Conv2d(fpn_dim, num_class, kernel_size=1))
Exemple #13
0
    def __init__(self,
                 num_class=150,
                 fc_dim=4096,
                 use_softmax=False,
                 pool_scales=(1, 2, 3, 6),
                 fpn_inplanes=(256, 512, 1024, 2048),
                 fpn_dim=256):
        super(CspnUPerNet, self).__init__()
        self.use_softmax = use_softmax

        # PPM Module
        self.ppm_pooling = []
        self.ppm_conv = []

        for scale in pool_scales:
            self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
            self.ppm_conv.append(
                nn.Sequential(
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    SynchronizedBatchNorm2d(512), nn.ReLU(inplace=True)))
        self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
        self.ppm_conv = nn.ModuleList(self.ppm_conv)
        self.ppm_last_conv = nn.Sequential(
            conv3x3_bn_relu(fc_dim + len(pool_scales) * 512, fpn_dim, 1),
            nn.Dropout2d(0.1))

        # FPN Module
        self.fpn_in = []
        for fpn_inplane in fpn_inplanes[:-1]:  # skip the top layer
            self.fpn_in.append(
                nn.Sequential(
                    nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
                    SynchronizedBatchNorm2d(fpn_dim), nn.ReLU(inplace=True)))
        self.fpn_in = nn.ModuleList(self.fpn_in)

        self.fpn_out = []
        for i in range(len(fpn_inplanes) - 1):  # skip the top layer
            self.fpn_out.append(
                nn.Sequential(conv3x3_bn_relu(fpn_dim, fpn_dim, 1), ))
        self.fpn_out = nn.ModuleList(self.fpn_out)

        self.fpn_out_last = nn.Sequential(
            conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
            nn.Dropout2d(0.9),
        )
        self.conv_last = nn.Conv2d(fpn_dim, num_class, kernel_size=1)

        self.cspn_last = nn.Sequential(
            nn.Conv2d(fpn_dim,
                      8 * cfg.MODEL.NUM_CLASSES,
                      3,
                      padding=1,
                      groups=cfg.RESNETS.NUM_GROUPS),
            SynchronizedBatchNorm2d(8 * cfg.MODEL.NUM_CLASSES),
        )
        self.cspn_net = Affinity_Propagate()
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = conv1x1(inplanes, planes)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.conv3 = conv1x1(planes, planes * self.expansion)
     self.bn3 = SynchronizedBatchNorm2d(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Exemple #15
0
 def __init__(self, inplanes, planes, stride=1, groups=1, downsample=None):
     super(GroupBottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1, groups=groups, bias=False)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=False)
     self.bn3 = SynchronizedBatchNorm2d(planes * 2)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
    def __init__(self,
                 num_class=150,
                 fc_dim=4096,
                 use_softmax=False,
                 pool_scales=(1, 2, 3, 6)):
        super(PPMBilinearDeepsup, self).__init__()
        self.use_softmax = use_softmax

        self.ppm = []
        for scale in pool_scales:
            self.ppm.append(
                nn.Sequential(
                    nn.AdaptiveAvgPool2d(scale),
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    SynchronizedBatchNorm2d(512), nn.ReLU(inplace=True)))
        self.ppm = nn.ModuleList(self.ppm)
        self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)

        self.conv_last = nn.Sequential(
            nn.Conv2d(fc_dim + len(pool_scales) * 512,
                      512,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            SynchronizedBatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.25),
            # nn.Conv2d(512, num_class, kernel_size=1)
            # ConvLSTM((128, 128), 5, [512], (3, 3), 1, batch_first=True, return_all_layers=False),
        )

        # self.channel_wise = ChannelWiseBlock(512, 64)
        # self.spatial_wise = SpatialWiseBlock(512)

        self.LSTM_previous = nn.Conv2d(512, 64, kernel_size=3, padding=1)
        self.LSTM = ConvLSTM((128, 128),
                             128, [64], (3, 3),
                             1,
                             batch_first=True,
                             return_all_layers=False)
        self.LSTM_after = nn.Conv2d(64, num_class, kernel_size=1)

        self.LSTM_framemunis = ConvLSTM((128, 128),
                                        64, [64], (3, 3),
                                        1,
                                        batch_first=True,
                                        return_all_layers=False)
        # self.LSTM_framemunis_after = nn.Conv2d(16, num_class, kernel_size=1)

        # self.classify_conv = nn.Conv2d(512, num_class, kernel_size=1)
        self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
        self.dropout_deepsup = nn.Dropout2d(0.1)
Exemple #17
0
 def __init__(self, inplanes, planes, stride=1, upsample=None, **kwargs):
     super(TransBasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, inplanes)
     self.bn1 = SynchronizedBatchNorm2d(inplanes)
     self.relu = nn.ReLU(inplace=True)
     if upsample is not None and stride != 1:
         self.conv2 = nn.ConvTranspose2d(inplanes, planes,
                                         kernel_size=3, stride=stride, padding=1,
                                         output_padding=1, bias=False)
     else:
         self.conv2 = conv3x3(inplanes, planes, stride)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.upsample = upsample
     self.stride = stride
    def __init__(self, num_class=150, fc_dim=4096,
                 use_softmax=False, pool_scales=(1, 2, 3, 6)):
        super(SPN_PPMBilinearDeepsup, self).__init__()
        self.use_softmax = use_softmax

        self.ppm = []
        for scale in pool_scales:
            self.ppm.append(nn.Sequential(
                nn.AdaptiveAvgPool2d(scale),
                nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                SynchronizedBatchNorm2d(512),
                nn.ReLU(inplace=True)
            ))
        self.ppm = nn.ModuleList(self.ppm)

        self.conv_last = nn.Sequential(
            nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
                      kernel_size=3, padding=1, bias=False),
            SynchronizedBatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_class, kernel_size=1)
        )

        self.cbr_guidance = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
        self.dropout_guidance = nn.Dropout2d(0.1)

        self.conv_last_guidance = nn.Conv2d(fc_dim // 4, num_class*12, 1, 1, 0)

        # spn model:
        # Input: score map + guidance network output
        # Output: refined map
        # score map: N*W*H*Class
        # guidance network output: N*W*H*(21*12=252)

        self.propagator_l2r = GateRecurrent2dnoind(True, False) # left->right
        self.propagator_r2l = GateRecurrent2dnoind(True, True) # right->left
        self.propagator_t2b = GateRecurrent2dnoind(False, False) # top->bottom
        self.propagator_b2t = GateRecurrent2dnoind(False, True) # bottom->top

        self.conv_spn = nn.Sequential(
            nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
                      kernel_size=3, padding=1, bias=False),
            SynchronizedBatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_class, kernel_size=1)
        )
    def __init__(self, orig_resnet):
        super(Resnet, self).__init__()

        # take pretrained resnet, except AvgPool and FC
        self.conv1 = orig_resnet.conv1
        self.bn1 = orig_resnet.bn1
        self.relu1 = orig_resnet.relu1
        self.conv2 = orig_resnet.conv2
        self.bn2 = orig_resnet.bn2
        self.relu2 = orig_resnet.relu2
        self.conv3 = orig_resnet.conv3
        self.bn3 = orig_resnet.bn3
        self.relu3 = orig_resnet.relu3
        self.maxpool = orig_resnet.maxpool
        self.layer1 = orig_resnet.layer1
        self.layer2 = orig_resnet.layer2
        self.layer3 = orig_resnet.layer3
        self.layer4 = orig_resnet.layer4
        self.correlation=CorrelationLayer1D(max_disp=40,stride_2=1)
        self.conv_rdi = nn.Sequential(nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0),
                                      nn.ReLU(inplace=True))

        self.conv_r = nn.Conv2d(357, 512, kernel_size=3, stride=1,padding=1, bias=False)

        self.bn4=SynchronizedBatchNorm2d(512)
Exemple #20
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    groups,
                    reduction,
                    stride=1,
                    downsample_kernel_size=1,
                    downsample_padding=0):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=downsample_kernel_size,
                          stride=stride,
                          padding=downsample_padding,
                          bias=False),
                SynchronizedBatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes, planes, groups, reduction, stride,
                  downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, groups, reduction))

        return nn.Sequential(*layers)
Exemple #21
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride,
              padding,
              dilation,
              relu=True,
              need_bn=False):
     super(_ConvBatchNormReLU, self).__init__()
     self.add_module(
         "conv",
         nn.Conv2d(
             in_channels=in_channels,
             out_channels=out_channels,
             kernel_size=kernel_size,
             stride=stride,
             padding=padding,
             dilation=dilation,
             bias=False,
         ),
     )
     if need_bn:
         self.add_module("bn", SynchronizedBatchNorm2d(out_channels))
     if relu:
         self.add_module("relu",
                         nn.LeakyReLU(negative_slope=0.2, inplace=True))
    def __init__(self,
                 block,
                 layers,
                 num_classes=1000,
                 zero_init_residual=False,
                 freeze_until=None):
        super(PTResNet, self).__init__()
        self.inplanes = 64
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = SynchronizedBatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.num_classes = num_classes
        if num_classes is not None:
            self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, SynchronizedBatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)

        if freeze_until is not None:
            if freeze_until.startswith('layer'):
                self.conv1.weight.requires_grad = False
                done_freeze = False
                for idx in range(1, 5):
                    layername = 'layer%d' % (idx)
                    if layername == freeze_until:
                        done_freeze = True
                    layermod = getattr(self, layername)
                    print("Freezing %s" % (layername))
                    for m in layermod.parameters():
                        m.requires_grad = False
                    if done_freeze:
                        break
            else:
                raise NotImplementedError()
Exemple #23
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet152, self).__init__()
        if cfg.SEM.FREEZE_BN:
            print("Freezing BN and using AffineChannel2d as Batchnorm2d!")
            from lib.nn import AffineChannel2d
            SynchronizedBatchNorm2d = AffineChannel2d
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=True)
        #self.conv1 = nnl.SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3)
        self.bn1 = SynchronizedBatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, SynchronizedBatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
    def __init__(self, num_class=150, fc_dim=4096,
                 use_softmax=False, pool_scales=(1, 2, 3, 6),
                 ppm_last_conv_dim=512,
                 fpn_inplanes=(256,512,1024,2048), fpn_dim=256,
                 T=1
                 ):
        super(UPerNet, self).__init__()
        self.use_softmax = use_softmax
        self.T = T

        # PPM Module
        self.ppm_pooling = []
        self.ppm_conv = []

        for scale in pool_scales:
            self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
            self.ppm_conv.append(nn.Sequential(
                nn.Conv2d(fc_dim, ppm_last_conv_dim, kernel_size=1, bias=False),
                SynchronizedBatchNorm2d(ppm_last_conv_dim),
                nn.ReLU(inplace=True)
            ))
        self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
        self.ppm_conv = nn.ModuleList(self.ppm_conv)
        self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*ppm_last_conv_dim, fpn_dim, 1)

        # FPN Module
        self.fpn_in = []
        for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
            self.fpn_in.append(nn.Sequential(
                nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
                SynchronizedBatchNorm2d(fpn_dim),
                nn.ReLU(inplace=True)
            ))
        self.fpn_in = nn.ModuleList(self.fpn_in)

        self.fpn_out = []
        for i in range(len(fpn_inplanes) - 1): # skip the top layer
            self.fpn_out.append(nn.Sequential(
                conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
            ))
        self.fpn_out = nn.ModuleList(self.fpn_out)

        self.conv_last = nn.Sequential(
            conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
            nn.Conv2d(fpn_dim, num_class, kernel_size=1)
        )
Exemple #25
0
    def __init__(self,
                 num_class=150,
                 fc_dim=2048,
                 use_softmax=False,
                 pool_scales=(1, 2, 3, 6),
                 quad_inplanes=(128, 256, 512, 1024, 2048),
                 quad_dim=256):
        super(QuadNet, self).__init__()
        self.use_softmax = use_softmax

        # PPM Module
        self.ppm_pooling = []
        self.ppm_conv = []

        for scale in pool_scales:
            self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
            self.ppm_conv.append(
                nn.Sequential(
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    SynchronizedBatchNorm2d(512), nn.ReLU(inplace=True)))
        self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
        self.ppm_conv = nn.ModuleList(self.ppm_conv)
        self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales) * 512,
                                             quad_dim, 1)

        # GCN Module
        self.quad_in = []
        for quad_inplane in quad_inplanes[:-1]:  # skip the top layer
            self.quad_in.append(
                nn.Sequential(
                    nn.Conv2d(quad_inplane,
                              quad_dim,
                              kernel_size=1,
                              bias=False), SynchronizedBatchNorm2d(quad_dim),
                    nn.ReLU(inplace=True)))
        self.quad_in = nn.ModuleList(self.quad_in)

        self.quad_gcn = nn.Sequential(
            nn.Conv2d(quad_dim * 6, quad_dim, kernel_size=1, bias=False),
            SynchronizedBatchNorm2d(quad_dim), nn.ReLU(inplace=True))

        self.quad_out = []
        for i in range(len(quad_inplanes) - 1):  # skip the bottom layer
            self.quad_out.append(
                nn.Conv2d(quad_dim, num_class + 1, kernel_size=1, bias=False))
        self.quad_out = nn.ModuleList(self.quad_out)
Exemple #26
0
    def __init__(self):
        super(SegmentationModuleBase, self).__init__()
        print("hello")

        # For cache
        self.mapping_to_detectron = None
        self.orphans_in_detectron = None
        self.iter=0
        self.draw=False
        builder = semseg_heads.ModelBuilder()

        #define encoder 
        if cfg.SEM.USE_RESNET:

            self.encoder = get_func(cfg.MODEL.CONV_BODY)()
        else:
            builder = semseg_heads.ModelBuilder()
            self.encoder = builder.build_encoder(
                arch=cfg.SEM.ARCH_ENCODER,
                fc_dim=cfg.SEM.FC_DIM,
                weights=cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS
                )

        #define shape weights
        self.decoder = builder.build_decoder(
                arch=cfg.SEM.DECODER_TYPE,
                fc_dim=cfg.SEM.FC_DIM,
                num_class=cfg.MODEL.NUM_CLASSES,
                use_softmax=not self.training,
                weights='')

        self.crit = nn.NLLLoss(ignore_index=255)
        self.deep_sup_scale = cfg.SEM.DEEP_SUB_SCALE
        
    
        if cfg.DISP.ORIGINAL:
            self.conv_last_disp = nn.Conv2d(cfg.DISP.DIM*2,1,kernel_size=3,padding=1,bias=False)
            self.conv_last_disp.apply(self._init_weights_normal)

        if cfg.DISP.USE_CRL_DISPFUL:
            self.conv_last_disp = CRL.DispFulNetSubset()
        if cfg.DISP.USE_CRL_DISPRES:
            self.conv_last_disp = CRL.DispResNetSubset(),
            self.conv_last_disp.apply(self._init_weights_normal)

                
        self.SmoothL1Loss = nn.SmoothL1Loss(reduction='none') #elementwise_mean
        #self.disp_loss = self._disp_loss
        #self.hardtanh = nn.Hardtanh(-1, 1,inplace=True)
        if len(cfg.SEM.DOWNSAMPLE) > 1:
            self.disp_deepsup=nn.Sequential(
                nn.Conv2d(cfg.SEM.FC_DIM // 2, cfg.SEM.FC_DIM // 4, kernel_size=3, stride=1, padding=1, bias=None),
                SynchronizedBatchNorm2d(cfg.SEM.FC_DIM // 4),
                nn.ReLU(inplace=True),
                nn.Dropout2d(0.1),
                nn.Conv2d(cfg.SEM.FC_DIM // 4, 1, kernel_size=3, stride=1, padding=1, bias=None)
            )
            self.disp_deepsup.apply(self._init_weights_kaiming)
Exemple #27
0
    def _make_skip_layer(self, inplanes, planes):

        layers = nn.Sequential(
            nn.Conv2d(inplanes, planes, kernel_size=1,
                      stride=1, padding=0, bias=False),
            SynchronizedBatchNorm2d(planes),
            nn.ReLU(inplace=True)
        )
        return layers
Exemple #28
0
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = round(inp * expand_ratio)
        self.use_res_connect = self.stride == 1 and inp == oup

        if expand_ratio == 1:
            self.conv = nn.Sequential(
                # dw
                nn.Conv2d(hidden_dim,
                          hidden_dim,
                          3,
                          stride,
                          1,
                          groups=hidden_dim,
                          bias=False),
                SynchronizedBatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                SynchronizedBatchNorm2d(oup),
            )
        else:
            self.conv = nn.Sequential(
                # pw
                nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                SynchronizedBatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                # dw
                nn.Conv2d(hidden_dim,
                          hidden_dim,
                          3,
                          stride,
                          1,
                          groups=hidden_dim,
                          bias=False),
                SynchronizedBatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                SynchronizedBatchNorm2d(oup),
            )
    def __init__(self, overall_stride=32):
        super(Xception, self).__init__()

        self.relu = nn.ReLU(inplace=True)

        self.conv1 = nn.Conv2d(3, 32, 3,2, 0, bias=False)
        self.bn1 = SynchronizedBatchNorm2d(32)

        self.conv2 = nn.Conv2d(32,64,3,bias=False)
        self.bn2 = SynchronizedBatchNorm2d(64)
        #do relu here

	dilation = 1
        self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True)	#block(in_dim,out_dim,reps,stride)
        self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True)
	if overall_stride == 8:
	    dilation *= 2
            self.block3=Block(256,728,2,1,start_with_relu=True,grow_first=True)	# pooling is at end of the block
	else:
	    self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True)

        self.block4=Block(728,728,3,1,start_with_relu=True,grow_first=True,dilation=dilation)
        self.block5=Block(728,728,3,1,start_with_relu=True,grow_first=True,dilation=dilation)
        self.block6=Block(728,728,3,1,start_with_relu=True,grow_first=True,dilation=dilation)
        self.block7=Block(728,728,3,1,start_with_relu=True,grow_first=True,dilation=dilation)

        self.block8=Block(728,728,3,1,start_with_relu=True,grow_first=True,dilation=dilation)
        self.block9=Block(728,728,3,1,start_with_relu=True,grow_first=True,dilation=dilation)
        self.block10=Block(728,728,3,1,start_with_relu=True,grow_first=True,dilation=dilation)
        self.block11=Block(728,728,3,1,start_with_relu=True,grow_first=True,dilation=dilation)

	if overall_stride == 8 or overall_stride == 16:
	    dilation *= 2
            self.block12=Block(728,1024,2,1,start_with_relu=True,grow_first=False,dilation=dilation)
	else:
	    self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False,dilation=dilation)

        self.conv3 = SeparableConv2d(1024,1536,3,1,dilation*2,dilation=dilation*2)	# SeparableConv2d(in_dim,out_dim,kernel,stride,pad), dilation with multi-grid (1,2,4)
        self.bn3 = SynchronizedBatchNorm2d(1536)

        #do relu here
        self.conv4 = SeparableConv2d(1536,2048,3,1,dilation*4,dilation=dilation*4)	# dilation with multi-grid (1,2,4)
        self.bn4 = SynchronizedBatchNorm2d(2048)
Exemple #30
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck_GE, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = SynchronizedBatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
     self.Sigmoid = nn.Sigmoid()
     if cfg.SEM.USE_GE_BLOCK:
         self.AvgPool2d = nn.Sequential(nn.AdaptiveAvgPool2d(1))
     """