Пример #1
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, cfg):
        super().__init__()
        assert cfg.MODEL_AUX_OUT == 2 + 2 * cfg.MODEL_NUM_CLASSES
        self.cfg = cfg
        self.backbone = None
        self.backbone_layers = None
        input_channel = 2048
        self.aspp = ASPP(dim_in=input_channel,
                         dim_out=cfg.MODEL_ASPP_OUTDIM,
                         rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                         bn_mom=cfg.TRAIN_BN_MOM)
        self.dropout1 = nn.Dropout(0.5)
        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
        self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=cfg.MODEL_OUTPUT_STRIDE // 4)

        indim = 256
        self.shortcut_conv = nn.Sequential(
            nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1,
                      padding=cfg.MODEL_SHORTCUT_KERNEL // 2, bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
        )
        self.cat_conv = nn.Sequential(
            nn.Conv2d(cfg.MODEL_ASPP_OUTDIM + cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,
                      bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )
        self.offset_conv = Decoder(cfg.MODEL_AUX_OUT)
        self.seed_map_conv = Decoder(cfg.MODEL_NUM_CLASSES)

        self.bbox_attention1 = nn.Sequential(
            nn.Conv2d(cfg.MODEL_NUM_CLASSES + 1, indim, 3, 1, padding=1, bias=True),
            nn.Sigmoid()
        )

        self.bbox_attention2 = nn.Sequential(
            nn.Conv2d(cfg.MODEL_NUM_CLASSES + 1, input_channel, 3, 1, padding=1, bias=True),
            nn.Sigmoid()
        )
        # self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.init_output()

        self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE)
        self.backbone_layers = self.backbone.get_layers()
        self.apply(self.init_bn)
Пример #3
0
 def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride, atrous)
     # self.bn1 = nn.BatchNorm2d(planes)
     self.bn1 = SynchronizedBatchNorm2d(planes, momentum=self.bn_mom)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     # self.bn2 = nn.BatchNorm2d(planes)
     self.bn2 = SynchronizedBatchNorm2d(planes, momentum=self.bn_mom)
     self.downsample = downsample
     self.stride = stride
Пример #4
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation,
                            padding=dilation, bias=False)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = SynchronizedBatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Пример #5
0
    def __init__(self, block, layers=(3, 4, 23, 3)):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(6,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = SynchronizedBatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=1,
                                       dilation=2)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=1,
                                       dilation=4)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, SynchronizedBatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Пример #6
0
 def __init__(self, ninput, noutput):
     super().__init__()
     self.conv = nn.ConvTranspose2d(ninput,
                                    noutput,
                                    3,
                                    stride=2,
                                    padding=1,
                                    output_padding=1,
                                    bias=True)
     self.bn = SynchronizedBatchNorm2d(noutput, eps=1e-3)
Пример #7
0
def get_norm_layer(opt, norm_nc):
    if opt.param_free_norm == 'instance':
        return nn.InstanceNorm2d(norm_nc, affine=False)
    if opt.param_free_norm == 'syncbatch':
        return SynchronizedBatchNorm2d(norm_nc, affine=False)
    if opt.param_free_norm == 'batch':
        return nn.BatchNorm2d(norm_nc, affine=False)
    else:
        raise ValueError(
            '%s is not a recognized param-free norm type in SPADE' %
            opt.param_free_norm)
Пример #8
0
    def __init__(self, x_channels, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Sequential(
            SynchronizedBatchNorm2d(in_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            SynchronizedBatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
        )

        self.conv2 = nn.Sequential(
            SynchronizedBatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            SynchronizedBatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
        )

        self.shortcut = nn.Conv2d(x_channels, out_channels, kernel_size=1)
Пример #9
0
 def __init__(self, out_ch, embedding_dims):
     super().__init__()
     # Shared embedding is calculated in initial layer of G 
     # onehot(y) -> shared embedding -> projected
     # [projected + latent z] -> linear(gain, bias) -> conditional batch norm
     # embedding_dims = [projected_dim + latent_z_dim]
     self.out_ch = out_ch
     self.bn = SynchronizedBatchNorm2d(out_ch, affine=False)
     # gain, bias
     self.gain = nn.Linear(embedding_dims, out_ch, bias=False)
     self.bias = nn.Linear(embedding_dims, out_ch, bias=False)
     # init
     nn.init.orthogonal_(self.gain.weight)
     nn.init.zeros_(self.bias.weight)
Пример #10
0
    def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                SynchronizedBatchNorm2d(planes * block.expansion),
            )

        layers = [block(self.inplanes, planes, stride, downsample)]
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, dilation=dilation))

        return nn.Sequential(*layers)
Пример #11
0
def BatchNorm(planes, sync_bn=False):
    if not sync_bn:
        return SynchronizedBatchNorm2d(planes)
    return nn.BatchNorm2d(planes)