def __init__(self, args): super(deeplabv3plus_res18, self).__init__() self.backbone = None self.backbone_layers = None input_channel = 256 args.TRAIN_BN_MOM = 0.1 self.aspp = ASPP(dim_in=input_channel, dim_out=args.MODEL_ASPP_OUTDIM, rate=16 // args.MODEL_OUTPUT_STRIDE, bn_mom=args.TRAIN_BN_MOM ) self.dropout1 = nn.Dropout(0.5) self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4) self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=args.MODEL_OUTPUT_STRIDE // 4) indim = 32 self.shortcut_conv = nn.Sequential( nn.Conv2d(indim, args.MODEL_SHORTCUT_DIM, args.MODEL_SHORTCUT_KERNEL, 1, padding=args.MODEL_SHORTCUT_KERNEL // 2, bias=True), SynchronizedBatchNorm2d(args.MODEL_SHORTCUT_DIM, momentum=args.TRAIN_BN_MOM), nn.ReLU(inplace=True), ) self.cat_conv = nn.Sequential( nn.Conv2d(args.MODEL_ASPP_OUTDIM + args.MODEL_SHORTCUT_DIM, args.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(args.MODEL_ASPP_OUTDIM, momentum=args.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Conv2d(args.MODEL_ASPP_OUTDIM, args.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(args.MODEL_ASPP_OUTDIM, momentum=args.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.1), ) self.cls_conv = nn.Conv2d(args.MODEL_ASPP_OUTDIM, args.num_classes, 1, 1, padding=0) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) self.backbone = build_backbone(args.MODEL_BACKBONE, os=args.MODEL_OUTPUT_STRIDE) self.backbone_layers = self.backbone.get_layers() self.upsample2 = nn.UpsamplingBilinear2d(scale_factor=2)
def __init__(self, cfg): super(deeplabv3plus_fpn, self).__init__() self.backbone = None self.backbone_layers = None input_channel = 2048 cfg.TRAIN_BN_MOM = 0.1 self.aspp = ASPP(dim_in=input_channel, dim_out=cfg.MODEL_ASPP_OUTDIM, rate=16 // cfg.MODEL_OUTPUT_STRIDE, bn_mom=cfg.TRAIN_BN_MOM) self.dropout1 = nn.Dropout(0.5) self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4) self.upsample_sub = nn.UpsamplingBilinear2d( scale_factor=cfg.MODEL_OUTPUT_STRIDE // 4) indim = 256 self.shortcut_conv = nn.Sequential( nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1, padding=cfg.MODEL_SHORTCUT_KERNEL // 2, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), ) self.cat_conv = nn.Sequential( nn.Conv2d(cfg.MODEL_ASPP_OUTDIM + cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.1), ) self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE) self.backbone_layers = self.backbone.get_layers() # ----------- fpn 结构----------------- in_layer0 = 256 # 1/4 in_layer1 = 512 # 1/8 in_layer2 = 1024 # 1/16 in_layer3 = 2048 # 1/16 self.feat3_conv = nn.Conv2d(in_layer3, in_layer3, 1, 1, padding=0) self.feat3_convS2 = nn.Conv2d(in_layer1, in_layer3, 3, 2, padding=1) self.feat2_conv = nn.Conv2d(in_layer1, in_layer1, 1, 1, padding=0) self.feat2_deconv = nn.ConvTranspose2d(in_layer3, in_layer1, 2, 2, padding=0) self.feat1_conv = nn.Conv2d(in_layer0, in_layer0, 1, 1, padding=0) self.feat1_deconv = nn.ConvTranspose2d(in_layer1, in_layer0, 2, 2, padding=0)
def __init__(self, cfg): super(deeplabv3plus_mutilAspp, self).__init__() self.backbone = None self.backbone_layers = None input_channel = 2048 cfg.TRAIN_BN_MOM = 0.1 self.aspp = ASPP( dim_in=input_channel, dim_out=cfg.MODEL_ASPP_OUTDIM, # 256 rate=16 // cfg.MODEL_OUTPUT_STRIDE, bn_mom=cfg.TRAIN_BN_MOM) input_channel_layer1 = 512 # 用于1/8特征图做aspp self.aspp_layer1 = ASPP_size4( dim_in=input_channel_layer1, dim_out=cfg.MODEL_ASPP_OUTDIM, # 256 rate=16 // cfg.MODEL_OUTPUT_STRIDE, bn_mom=cfg.TRAIN_BN_MOM) input_channel_layer0 = 256 # 用于1/4特征图做aspp self.aspp_layer0 = ASPP_size3( dim_in=input_channel_layer0, dim_out=cfg.MODEL_ASPP_OUTDIM, # 256 rate=16 // cfg.MODEL_OUTPUT_STRIDE, bn_mom=cfg.TRAIN_BN_MOM) self.dropout1 = nn.Dropout(0.5) self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4) self.upsample_sub = nn.UpsamplingBilinear2d( scale_factor=cfg.MODEL_OUTPUT_STRIDE // 4) self.upsample_sub_layer1 = nn.UpsamplingBilinear2d( scale_factor=cfg.MODEL_OUTPUT_STRIDE // 8) indim = 256 self.shortcut_conv = nn.Sequential( nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1, padding=cfg.MODEL_SHORTCUT_KERNEL // 2, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), ) self.cat_conv = nn.Sequential( nn.Conv2d(cfg.MODEL_ASPP_OUTDIM * 2 + cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.1), ) self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE) self.backbone_layers = self.backbone.get_layers()
def __init__(self, cfg): super(deeplabv3plus_baseOc, self).__init__() self.backbone = None self.backbone_layers = None backbone_out = 2048 oc_out = 1024 ''' layers torch.Size([2, 2048, 32, 32]) feature_aspp torch.Size([2, 256, 32, 32]) ''' self.context = nn.Sequential( nn.Conv2d(backbone_out, oc_out, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(oc_out), nn.ReLU(), BaseOC_Module(in_channels=oc_out, out_channels=oc_out, key_channels=oc_out // 2, value_channels=oc_out - oc_out // 2, dropout=0.05, sizes=([1]))) cfg.TRAIN_BN_MOM = 0.1 self.aspp = ASPP(dim_in=oc_out, dim_out=cfg.MODEL_ASPP_OUTDIM, rate=16 // cfg.MODEL_OUTPUT_STRIDE, bn_mom=cfg.TRAIN_BN_MOM) self.dropout1 = nn.Dropout(0.5) self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4) self.upsample_sub = nn.UpsamplingBilinear2d( scale_factor=cfg.MODEL_OUTPUT_STRIDE // 4) indim = 256 self.shortcut_conv = nn.Sequential( nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1, padding=cfg.MODEL_SHORTCUT_KERNEL // 2, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), ) self.cat_conv = nn.Sequential( nn.Conv2d(cfg.MODEL_ASPP_OUTDIM + cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.1), ) self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE) self.backbone_layers = self.backbone.get_layers()