Esempio n. 1
0
    def __init__(self, cfg, pretrain=False):
        from model.deeplabv3plus.deeplabv3plusEnc import Encoding, Mean

        super().__init__()
        self.backbone = None
        self.backbone_layers = None

        input_channel = 720
        cfg.MODEL_ASPP_OUTDIM = 512
        se_channel = 48

        self.pspaspp = PSPASPP(in_channels=input_channel,
                               out_channels=cfg.MODEL_ASPP_OUTDIM,
                               rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                               norm_layer=SynchronizedBatchNorm2d,
                               norm_kwargs={'momentum': cfg.TRAIN_BN_MOM})
        self.dropout1 = nn.Dropout(0.5)

        self.se_loss = nn.Sequential(
            # nn.AvgPool2d(kernel_size=7, stride=4, padding=3),
            nn.Conv2d(input_channel, se_channel, 1, bias=False),
            SynchronizedBatchNorm2d(se_channel, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(True),
            Encoding(D=se_channel, K=32),
            nn.BatchNorm1d(32),
            nn.ReLU(True),
            Mean(dim=1),
            nn.Linear(se_channel, cfg.MODEL_NUM_CLASSES)
        )

        self.cls_conv = nn.Sequential(
            nn.Conv2d(
                in_channels=cfg.MODEL_ASPP_OUTDIM,
                out_channels=cfg.MODEL_ASPP_OUTDIM,
                kernel_size=1,
                stride=1,
                padding=0),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=False),
            nn.Conv2d(
                in_channels=cfg.MODEL_ASPP_OUTDIM,
                out_channels=cfg.MODEL_NUM_CLASSES,
                kernel_size=1,
                stride=1,
                padding=0)
        )

        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.backbone = get_hrnet_basebone(cfg.MODEL_NUM_CLASSES, pretrain=pretrain, combine=False)
Esempio n. 2
0
    def __init__(self, cfg, pretrain=False):
        super().__init__()
        self.backbone = None
        self.backbone_layers = None
        input_channel = 720
        self.aspp = ASPP(dim_in=input_channel,
                         dim_out=cfg.MODEL_ASPP_OUTDIM,
                         rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                         bn_mom=cfg.TRAIN_BN_MOM)
        self.dropout1 = nn.Dropout(0.5)
        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)

        self.cls_conv = nn.Sequential(
            nn.Conv2d(
                in_channels=cfg.MODEL_ASPP_OUTDIM,
                out_channels=cfg.MODEL_ASPP_OUTDIM,
                kernel_size=1,
                stride=1,
                padding=0),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=False),
            nn.Conv2d(
                in_channels=cfg.MODEL_ASPP_OUTDIM,
                out_channels=cfg.MODEL_NUM_CLASSES,
                kernel_size=1,
                stride=1,
                padding=0)
        )
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.backbone = get_hrnet_basebone(cfg.MODEL_NUM_CLASSES, pretrain=pretrain)
Esempio n. 3
0
    def __init__(self, cfg, pretrain=False):
        super().__init__()
        self.backbone = None
        self.backbone_layers = None
        input_channel = [48, 96, 192, 384]
        cfg.MODEL_ASPP_OUTDIM = 512
        self.pspaspp1 = PSPASPP(in_channels=input_channel[0],
                                out_channels=cfg.MODEL_ASPP_OUTDIM,
                                rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                                norm_layer=SynchronizedBatchNorm2d,
                                norm_kwargs={'momentum': cfg.TRAIN_BN_MOM})
        self.pspaspp2 = PSPASPP(in_channels=input_channel[1],
                                out_channels=cfg.MODEL_ASPP_OUTDIM,
                                rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                                norm_layer=SynchronizedBatchNorm2d,
                                norm_kwargs={'momentum': cfg.TRAIN_BN_MOM})
        self.pspaspp3 = PSPASPP(in_channels=input_channel[2],
                                out_channels=cfg.MODEL_ASPP_OUTDIM,
                                rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                                norm_layer=SynchronizedBatchNorm2d,
                                norm_kwargs={'momentum': cfg.TRAIN_BN_MOM})
        self.pspaspp4 = PSPASPP(in_channels=input_channel[3],
                                out_channels=cfg.MODEL_ASPP_OUTDIM,
                                rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                                norm_layer=SynchronizedBatchNorm2d,
                                norm_kwargs={'momentum': cfg.TRAIN_BN_MOM})
        self.dropout1 = nn.Dropout(0.5)
        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)

        self.cls_conv = nn.Sequential(
            nn.Conv2d(
                in_channels=cfg.MODEL_ASPP_OUTDIM,
                out_channels=cfg.MODEL_ASPP_OUTDIM,
                kernel_size=1,
                stride=1,
                padding=0),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=False),
            nn.Conv2d(
                in_channels=cfg.MODEL_ASPP_OUTDIM,
                out_channels=cfg.MODEL_NUM_CLASSES,
                kernel_size=1,
                stride=1,
                padding=0)
        )
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.backbone = get_hrnet_basebone(cfg.MODEL_NUM_CLASSES, pretrain=pretrain, combine=False)
Esempio n. 4
0
    def testSyncBatchNorm2DSyncTrain(self):
        bn = nn.BatchNorm2d(10)
        sync_bn = SynchronizedBatchNorm2d(10)
        sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])

        bn.cuda()
        sync_bn.cuda()

        self._checkBatchNormResult(bn,
                                   sync_bn,
                                   torch.rand(16, 10, 16, 16),
                                   True,
                                   cuda=True)
Esempio n. 5
0
    def __init__(self, cfg, pretrain=False):
        super().__init__()
        from model.deeplabv3plus.deeplabv3plusEnc import Encoding, Mean
        self.backbone = None
        self.backbone_layers = None
        input_channel = [48, 96, 192, 384]
        self.aspp1 = ASPP(dim_in=input_channel[0],
                          dim_out=cfg.MODEL_ASPP_OUTDIM,
                          rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                          bn_mom=cfg.TRAIN_BN_MOM)
        self.aspp2 = ASPP(dim_in=input_channel[1],
                          dim_out=cfg.MODEL_ASPP_OUTDIM,
                          rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                          bn_mom=cfg.TRAIN_BN_MOM)
        self.aspp3 = ASPP(dim_in=input_channel[2],
                          dim_out=cfg.MODEL_ASPP_OUTDIM,
                          rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                          bn_mom=cfg.TRAIN_BN_MOM)
        self.aspp4 = ASPP(dim_in=input_channel[3],
                          dim_out=cfg.MODEL_ASPP_OUTDIM,
                          rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                          bn_mom=cfg.TRAIN_BN_MOM)
        self.dropout1 = nn.Dropout(0.5)
        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)

        se_channel = input_channel[-1] // 4
        self.se_loss = nn.Sequential(
            nn.Conv2d(input_channel[-1], se_channel, 1, bias=False),
            SynchronizedBatchNorm2d(se_channel, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(True),
            Encoding(D=se_channel, K=32),
            nn.BatchNorm1d(32),
            nn.ReLU(True),
            Mean(dim=1),
            nn.Linear(se_channel, cfg.MODEL_NUM_CLASSES)
        )

        self.cls_conv = nn.Sequential(
            nn.Conv2d(
                in_channels=cfg.MODEL_ASPP_OUTDIM,
                out_channels=cfg.MODEL_ASPP_OUTDIM,
                kernel_size=1,
                stride=1,
                padding=0),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=False),
            nn.Conv2d(
                in_channels=cfg.MODEL_ASPP_OUTDIM,
                out_channels=cfg.MODEL_NUM_CLASSES,
                kernel_size=1,
                stride=1,
                padding=0)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.backbone = get_hrnet_basebone(cfg.MODEL_NUM_CLASSES, pretrain=pretrain, combine=False)